StyleGAN-NADA / app.py
rinong's picture
Updated visibility change callbacks
96b8eee
raw history blame
No virus
12.2 kB
import os
from posixpath import basename
import torch
import gradio as gr
import os
import sys
import numpy as np
from e4e.models.psp import pSp
from util import *
from huggingface_hub import hf_hub_download
import os
import sys
import tempfile
import shutil
from argparse import Namespace
from pathlib import Path
import shutil
import dlib
import numpy as np
import torchvision.transforms as transforms
from torchvision import utils
from PIL import Image
from model.sg2_model import Generator
from generate_videos import generate_frames, video_from_interpolations, vid_to_gif
model_dir = "models"
os.makedirs(model_dir, exist_ok=True)
model_repos = {"e4e": ("akhaliq/JoJoGAN_e4e_ffhq_encode", "e4e_ffhq_encode.pt"),
"dlib": ("akhaliq/jojogan_dlib", "shape_predictor_68_face_landmarks.dat"),
"base": ("akhaliq/jojogan-stylegan2-ffhq-config-f", "stylegan2-ffhq-config-f.pt"),
"anime": ("rinong/stylegan-nada-models", "anime.pt"),
"joker": ("rinong/stylegan-nada-models", "joker.pt"),
# "simpson": ("rinong/stylegan-nada-models", "simpson.pt"),
# "ssj": ("rinong/stylegan-nada-models", "ssj.pt"),
# "white_walker": ("rinong/stylegan-nada-models", "white_walker.pt"),
# "zuckerberg": ("rinong/stylegan-nada-models", "zuckerberg.pt"),
# "cubism": ("rinong/stylegan-nada-models", "cubism.pt"),
# "disney_princess": ("rinong/stylegan-nada-models", "disney_princess.pt"),
# "edvard_munch": ("rinong/stylegan-nada-models", "edvard_munch.pt"),
# "van_gogh": ("rinong/stylegan-nada-models", "van_gogh.pt"),
# "oil": ("rinong/stylegan-nada-models", "oil.pt"),
# "rick_morty": ("rinong/stylegan-nada-models", "rick_morty.pt"),
# "botero": ("rinong/stylegan-nada-models", "botero.pt"),
# "crochet": ("rinong/stylegan-nada-models", "crochet.pt"),
# "modigliani": ("rinong/stylegan-nada-models", "modigliani.pt"),
# "shrek": ("rinong/stylegan-nada-models", "shrek.pt"),
# "sketch": ("rinong/stylegan-nada-models", "sketch.pt"),
# "thanos": ("rinong/stylegan-nada-models", "thanos.pt"),
}
def get_models():
os.makedirs(model_dir, exist_ok=True)
model_paths = {}
for model_name, repo_details in model_repos.items():
download_path = hf_hub_download(repo_id=repo_details[0], filename=repo_details[1])
model_paths[model_name] = download_path
return model_paths
model_paths = get_models()
class ImageEditor(object):
def __init__(self):
self.device = "cuda" if torch.cuda.is_available() else "cpu"
latent_size = 512
n_mlp = 8
channel_mult = 2
model_size = 1024
self.generators = {}
self.model_list = [name for name in model_paths.keys() if name not in ["e4e", "dlib"]]
for model in self.model_list:
g_ema = Generator(
model_size, latent_size, n_mlp, channel_multiplier=channel_mult
).to(self.device)
checkpoint = torch.load(model_paths[model], map_location=self.device)
g_ema.load_state_dict(checkpoint['g_ema'])
self.generators[model] = g_ema
self.experiment_args = {"model_path": model_paths["e4e"]}
self.experiment_args["transform"] = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
]
)
self.resize_dims = (256, 256)
model_path = self.experiment_args["model_path"]
ckpt = torch.load(model_path, map_location="cpu")
opts = ckpt["opts"]
opts["checkpoint_path"] = model_path
opts = Namespace(**opts)
self.e4e_net = pSp(opts, self.device)
self.e4e_net.eval()
self.shape_predictor = dlib.shape_predictor(
model_paths["dlib"]
)
print("setup complete")
def get_style_list(self):
# style_list = ['all', 'list - enter below']
style_list = []
for key in self.generators:
style_list.append(key)
return style_list
def predict(
self,
input, # Input image path
output_styles, # Which output style do you want to use?
generate_video, # Generate a video instead of an output image
with_editing, # Apply latent space editing to the generated video
video_format # Choose gif to display in browser, mp4 for higher-quality downloadable video
):
styles = output_styles
# @title Align image
input_image = self.run_alignment(str(input))
input_image = input_image.resize(self.resize_dims)
img_transforms = self.experiment_args["transform"]
transformed_image = img_transforms(input_image)
with torch.no_grad():
images, latents = self.run_on_batch(transformed_image.unsqueeze(0))
result_image, latent = images[0], latents[0]
inverted_latent = latent.unsqueeze(0).unsqueeze(1)
out_dir = Path(tempfile.mkdtemp())
out_path = out_dir / "out.jpg"
generators = [self.generators[style] for style in styles]
if not generate_video:
with torch.no_grad():
img_list = []
for g_ema in generators:
img, _ = g_ema(inverted_latent, input_is_latent=True, truncation=1, randomize_noise=False)
img_list.append(img)
out_img = torch.cat(img_list, axis=0)
utils.save_image(out_img, out_path, nrow=int(np.sqrt(out_img.size(0))), normalize=True, scale_each=True, range=(-1, 1))
return Path(out_path)
return self.generate_vid(generators, inverted_latent, out_dir, video_format, with_editing)
def generate_vid(self, generators, latent, out_dir, video_format, with_editing):
np_latent = latent.squeeze(0).cpu().detach().numpy()
args = {
'fps': 24,
'target_latents': None,
'edit_directions': None,
'unedited_frames': 0 if with_editing else 40 * (len(generators) - 1)
}
args = Namespace(**args)
with tempfile.TemporaryDirectory() as dirpath:
generate_frames(args, np_latent, generators, dirpath)
video_from_interpolations(args.fps, dirpath)
gen_path = Path(dirpath) / "out.mp4"
out_path = out_dir / f"out.{video_format}"
if video_format == 'gif':
vid_to_gif(gen_path, out_dir, scale=256, fps=args.fps)
else:
shutil.copy2(gen_path, out_path)
return out_path
def run_alignment(self, image_path):
aligned_image = align_face(filepath=image_path, predictor=self.shape_predictor)
print("Aligned image has shape: {}".format(aligned_image.size))
return aligned_image
def run_on_batch(self, inputs):
images, latents = self.e4e_net(
inputs.to(self.device).float(), randomize_noise=False, return_latents=True
)
return images, latents
editor = ImageEditor()
# def change_component_visibility(component_types, invert_choices):
# def visibility_impl(visible):
# return [component_types[idx].update(visible=visible ^ invert_choices[idx]) for idx in range(len(component_types))]
# return visibility_impl
def group_visibility(visible):
print("visible: ", visible)
return gr.Group.update(visibile=visible)
blocks = gr.Blocks()
with blocks:
gr.Markdown("<h1><center>StyleGAN-NADA</center></h1>")
gr.Markdown(
"Demo for StyleGAN-NADA: CLIP-Guided Domain Adaptation of Image Generators (SIGGRAPH 2022)."
)
gr.Markdown(
"For more information about the paper and code for training your own models (with examples OR text), see below."
)
with gr.Row():
with gr.Column():
input_img = gr.inputs.Image(type="filepath", label="Input image")
style_choice = gr.inputs.CheckboxGroup(choices=editor.get_style_list(), type="value", label="Choose your styles!")
video_choice = gr.inputs.Checkbox(default=False, label="Generate Video?", optional=False)
video_options_group = gr.Group()
with video_options_group:
edit_choice = gr.inputs.Checkbox(default=False, label="With Editing?", optional=False)
vid_format_choice = gr.inputs.Radio(choices=["gif", "mp4"], type="value", default='mp4', label="Video Format")
# img_button = gr.Button("Edit Image")
# vid_button = gr.Button("Generate Video")
img_button = gr.Button("Edit Image")
vid_button = gr.Button("Generate Video")
with gr.Column():
img_output = gr.outputs.Image(type="file")
vid_output = gr.outputs.Video()
# visibility_fn = change_component_visibility(component_types=[gr.Checkbox, gr.Radio, gr.Video, gr.Button, gr.Image, gr.Button],
# invert_choices=[False, False, False, False, True, True])
# video_choice.change(fn=visibility_fn, inputs=video_choice, outputs=[edit_choice, vid_format_choice, vid_output, vid_button, img_output, img_button])
video_choice.change(fn=group_visibility, inputs=video_choice, outputs=video_options_group)
img_button.click(fn=editor.predict, inputs=[input_img, style_choice, video_choice, edit_choice, vid_format_choice], outputs=img_output)
vid_button.click(fn=editor.predict, inputs=[input_img, style_choice, video_choice, edit_choice, vid_format_choice], outputs=vid_output)
# input_img = gr.inputs.Image(type="filepath", label="Input image")
# style_choice = gr.inputs.CheckboxGroup(choices=editor.get_style_list(), type="value", label="Choose your styles!")
# with gr.Tabs():
# with gr.TabItem("Edit Images"):
# with gr.Row():
# with gr.Column():
# video_choice = gr.inputs.Checkbox(default=False, label="Generate Video?", optional=False)
# edit_choice = gr.inputs.Checkbox(default=False, label="With Editing?", optional=False)
# vid_format_choice = gr.inputs.Radio(choices=["gif", "mp4"], type="value", default='mp4', label="Video Format")
# img_button = gr.Button("Edit Image")
# vid_button = gr.Button("Generate Video")
# with gr.Column():
# img_output = gr.outputs.Image(type="file")
# vid_output = gr.outputs.Video()
# visibility_fn = change_component_visibility(component_types=[gr.Checkbox, gr.Radio, gr.Video, gr.Button, gr.Image, gr.Button],
# invert_choices=[False, False, False, False, True, True])
# video_choice.change(fn=visibility_fn, inputs=video_choice, outputs=[edit_choice, vid_format_choice, vid_output, vid_button, img_output, img_button])
# img_button.click(fn=editor.predict, inputs=[input_img, style_choice, video_choice, edit_choice, vid_format_choice], outputs=img_output)
# vid_button.click(fn=editor.predict, inputs=[input_img, style_choice, video_choice, edit_choice, vid_format_choice], outputs=vid_output)
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2108.00946' target='_blank'>StyleGAN-NADA: CLIP-Guided Domain Adaptation of Image Generators</a> | <a href='https://stylegan-nada.github.io/' target='_blank'>Project Page</a> | <a href='https://github.com/rinongal/StyleGAN-nada' target='_blank'>Code</a></p> <center><img src='https://visitor-badge.glitch.me/badge?page_id=rinong_sgnada' alt='visitor badge'></center>"
gr.Markdown(article)
blocks.launch(enable_queue=True)