Spaces:
Runtime error
Runtime error
import os | |
import random | |
import torch | |
import gradio as gr | |
from e4e.models.psp import pSp | |
from util import * | |
from huggingface_hub import hf_hub_download | |
import tempfile | |
from argparse import Namespace | |
import shutil | |
import dlib | |
import numpy as np | |
import torchvision.transforms as transforms | |
from torchvision import utils | |
from model.sg2_model import Generator | |
from generate_videos import generate_frames, video_from_interpolations, project_code_by_edit_name | |
from styleclip.styleclip_global import project_code_with_styleclip, style_tensor_to_style_dict | |
import clip | |
model_dir = "models" | |
os.makedirs(model_dir, exist_ok=True) | |
model_repos = {"e4e": ("akhaliq/JoJoGAN_e4e_ffhq_encode", "e4e_ffhq_encode.pt"), | |
"dlib": ("akhaliq/jojogan_dlib", "shape_predictor_68_face_landmarks.dat"), | |
"sc_fs3": ("rinong/stylegan-nada-models", "fs3.npy"), | |
"base": ("akhaliq/jojogan-stylegan2-ffhq-config-f", "stylegan2-ffhq-config-f.pt"), | |
"anime": ("rinong/stylegan-nada-models", "anime.pt"), | |
"joker": ("rinong/stylegan-nada-models", "joker.pt"), | |
"simpson": ("rinong/stylegan-nada-models", "simpson.pt"), | |
"ssj": ("rinong/stylegan-nada-models", "ssj.pt"), | |
"white_walker": ("rinong/stylegan-nada-models", "white_walker.pt"), | |
"zuckerberg": ("rinong/stylegan-nada-models", "zuckerberg.pt"), | |
"cubism": ("rinong/stylegan-nada-models", "cubism.pt"), | |
"disney_princess": ("rinong/stylegan-nada-models", "disney_princess.pt"), | |
"edvard_munch": ("rinong/stylegan-nada-models", "edvard_munch.pt"), | |
"van_gogh": ("rinong/stylegan-nada-models", "van_gogh.pt"), | |
"oil": ("rinong/stylegan-nada-models", "oil.pt"), | |
"rick_morty": ("rinong/stylegan-nada-models", "rick_morty.pt"), | |
"botero": ("rinong/stylegan-nada-models", "botero.pt"), | |
"crochet": ("rinong/stylegan-nada-models", "crochet.pt"), | |
"modigliani": ("rinong/stylegan-nada-models", "modigliani.pt"), | |
"shrek": ("rinong/stylegan-nada-models", "shrek.pt"), | |
"sketch": ("rinong/stylegan-nada-models", "sketch.pt"), | |
"thanos": ("rinong/stylegan-nada-models", "thanos.pt"), | |
"ukyioe": ("rinong/stylegan-nada-models", "ukyioe.pt"), | |
"witcher": ("rinong/stylegan-nada-models", "witcher.pt"), | |
"marble": ("rinong/stylegan-nada-models", "marble.pt"), | |
"ghibli": ("rinong/stylegan-nada-models", "ghibli.pt"), | |
"grafitti_on_wall": ("rinong/stylegan-nada-models", "grafitti_on_wall.pt"), | |
} | |
def get_models(): | |
os.makedirs(model_dir, exist_ok=True) | |
model_paths = {} | |
for model_name, repo_details in model_repos.items(): | |
download_path = hf_hub_download(repo_id=repo_details[0], filename=repo_details[1]) | |
model_paths[model_name] = download_path | |
return model_paths | |
model_paths = get_models() | |
class ImageEditor(object): | |
def __init__(self): | |
self.device = "cuda" if torch.cuda.is_available() else "cpu" | |
latent_size = 512 | |
n_mlp = 8 | |
channel_mult = 2 | |
model_size = 1024 | |
self.generators = {} | |
self.model_list = [name for name in model_paths.keys() if name not in ["e4e", "dlib", "sc_fs3"]] | |
for model in self.model_list: | |
g_ema = Generator( | |
model_size, latent_size, n_mlp, channel_multiplier=channel_mult | |
).to(self.device) | |
checkpoint = torch.load(model_paths[model], map_location=self.device) | |
g_ema.load_state_dict(checkpoint['g_ema']) | |
self.generators[model] = g_ema | |
self.experiment_args = {"model_path": model_paths["e4e"]} | |
self.experiment_args["transform"] = transforms.Compose( | |
[ | |
transforms.Resize((256, 256)), | |
transforms.ToTensor(), | |
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]), | |
] | |
) | |
self.resize_dims = (256, 256) | |
model_path = self.experiment_args["model_path"] | |
ckpt = torch.load(model_path, map_location="cpu") | |
opts = ckpt["opts"] | |
opts["checkpoint_path"] = model_path | |
opts = Namespace(**opts) | |
self.e4e_net = pSp(opts, self.device) | |
self.e4e_net.eval() | |
self.shape_predictor = dlib.shape_predictor( | |
model_paths["dlib"] | |
) | |
self.styleclip_fs3 = torch.from_numpy(np.load(model_paths["sc_fs3"])).to(self.device) | |
self.clip_model, _ = clip.load("ViT-B/32", device=self.device) | |
print("setup complete") | |
def get_style_list(self): | |
style_list = [] | |
for key in self.generators: | |
style_list.append(key) | |
return style_list | |
def invert_image(self, input_image): | |
input_image = self.run_alignment(str(input_image)) | |
input_image = input_image.resize(self.resize_dims) | |
img_transforms = self.experiment_args["transform"] | |
transformed_image = img_transforms(input_image) | |
with torch.no_grad(): | |
images, latents = self.run_on_batch(transformed_image.unsqueeze(0)) | |
result_image, latent = images[0], latents[0] | |
inverted_latent = latent.unsqueeze(0).unsqueeze(1) | |
return inverted_latent | |
def get_generators_for_styles(self, output_styles, loop_styles=False): | |
if "base" in output_styles: # always start with base if chosen | |
output_styles.insert(0, output_styles.pop(output_styles.index("base"))) | |
if loop_styles: | |
output_styles.append(output_styles[0]) | |
return [self.generators[style] for style in output_styles] | |
def _pack_edits(func): | |
def inner(self, | |
edit_type_choice, | |
pose_slider, | |
smile_slider, | |
gender_slider, | |
age_slider, | |
hair_slider, | |
src_text_styleclip, | |
tar_text_styleclip, | |
alpha_styleclip, | |
beta_styleclip, | |
*args): | |
edit_choices = {"edit_type": edit_type_choice, | |
"pose": pose_slider, | |
"smile": smile_slider, | |
"gender": gender_slider, | |
"age": age_slider, | |
"hair_length": hair_slider, | |
"src_text": src_text_styleclip, | |
"tar_text": tar_text_styleclip, | |
"alpha": alpha_styleclip, | |
"beta": beta_styleclip} | |
return func(self, *args, edit_choices) | |
return inner | |
def get_target_latents(self, source_latent, edit_choices, generators): | |
target_latents = [] | |
if edit_choices["edit_type"] == "InterFaceGAN": | |
np_source_latent = source_latent.squeeze(0).cpu().detach().numpy() | |
for attribute_name in ["pose", "smile", "gender", "age", "hair_length"]: | |
strength = edit_choices[attribute_name] | |
if strength != 0.0: | |
projected_code_np = project_code_by_edit_name(np_source_latent, attribute_name, strength) | |
target_latents.append(torch.from_numpy(projected_code_np).float().to(self.device)) | |
elif edit_choices["edit_type"] == "StyleCLIP": | |
if edit_choices["alpha"] != 0.0: | |
source_s_dict = generators[0].get_s_code(source_latent, input_is_latent=True)[0] | |
target_latents.append(project_code_with_styleclip(source_s_dict, | |
edit_choices["src_text"], | |
edit_choices["tar_text"], | |
edit_choices["alpha"], | |
edit_choices["beta"], | |
generators[0], | |
self.styleclip_fs3, | |
self.clip_model)) | |
# if edit type is none or if all sliders were set to 0 | |
if not target_latents: | |
target_latents = [source_latent.squeeze(0), ] * max((len(generators) - 1), 1) | |
return target_latents | |
def edit_image(self, input, output_styles, edit_choices): | |
return self.predict(input, output_styles, edit_choices=edit_choices) | |
def edit_video(self, input, output_styles, loop_styles, edit_choices): | |
return self.predict(input, output_styles, generate_video=True, loop_styles=loop_styles, edit_choices=edit_choices) | |
def predict( | |
self, | |
input, # Input image path | |
output_styles, # Style checkbox options. | |
generate_video = False, # Generate a video instead of an output image | |
loop_styles = False, # Loop back to the initial style | |
edit_choices = None, # Optional dictionary with edit choice arguments | |
): | |
if edit_choices is None: | |
edit_choices = {"edit_type": "None"} | |
# @title Align image | |
out_dir = tempfile.mkdtemp() | |
inverted_latent = self.invert_image(input) | |
generators = self.get_generators_for_styles(output_styles, loop_styles) | |
target_latents = self.get_target_latents(inverted_latent, edit_choices, generators) | |
if not generate_video: | |
output_paths = [] | |
with torch.no_grad(): | |
for g_ema in generators: | |
latent_for_gen = random.choice(target_latents) | |
if edit_choices["edit_type"] == "StyleCLIP": | |
latent_for_gen = style_tensor_to_style_dict(latent_for_gen, g_ema) | |
img, _ = g_ema(latent_for_gen, input_is_s_code=True, input_is_latent=True, truncation=1, randomize_noise=False) | |
else: | |
img, _ = g_ema([latent_for_gen], input_is_latent=True, truncation=1, randomize_noise=False) | |
output_path = os.path.join(out_dir, f"out_{len(output_paths)}.jpg") | |
utils.save_image(img, output_path, nrow=1, normalize=True, range=(-1, 1)) | |
output_paths.append(output_path) | |
return output_paths | |
return self.generate_vid(generators, inverted_latent, target_latents, out_dir) | |
def generate_vid(self, generators, source_latent, target_latents, out_dir): | |
fps = 24 | |
with tempfile.TemporaryDirectory() as dirpath: | |
generate_frames(source_latent, target_latents, generators, dirpath) | |
video_from_interpolations(fps, dirpath) | |
gen_path = os.path.join(dirpath, "out.mp4") | |
out_path = os.path.join(out_dir, "out.mp4") | |
shutil.copy2(gen_path, out_path) | |
return out_path | |
def run_alignment(self, image_path): | |
aligned_image = align_face(filepath=image_path, predictor=self.shape_predictor) | |
print("Aligned image has shape: {}".format(aligned_image.size)) | |
return aligned_image | |
def run_on_batch(self, inputs): | |
images, latents = self.e4e_net( | |
inputs.to(self.device).float(), randomize_noise=False, return_latents=True | |
) | |
return images, latents | |
editor = ImageEditor() | |
blocks = gr.Blocks() | |
with blocks: | |
gr.Markdown("<h1><center>StyleGAN-NADA</center></h1>") | |
gr.Markdown( | |
"<h4 style='font-size: 110%;margin-top:.5em'>Inference demo for StyleGAN-NADA: CLIP-Guided Domain Adaptation of Image Generators (SIGGRAPH 2022).</h4>" | |
) | |
gr.Markdown( | |
"<h4 style='font-size: 110%;margin-top:.5em'>Usage</h4><div>Upload an image of your face, pick your desired output styles, and apply StyleGAN-based editing.</div>" | |
"<div>Choose the edit image tab to create static images in all chosen styles. Choose the video tab in order to interpolate between all chosen styles (But take it easy on the servers! We've limited video length, so if you add too many styles, they'll pass in the blink of an eye! 🤗)</div>" | |
) | |
gr.Markdown( | |
"For more information about the paper and code for training your own models (with examples OR text), please visit our <a href='https://stylegan-nada.github.io/' target='_blank'>Project Page</a> or the <a href='https://github.com/rinongal/StyleGAN-nada' target='_blank'>official repository</a>." | |
) | |
gr.Markdown("<h4 style='font-size: 110%;margin-top:.5em'>A note on social impact</h4><div>This model relies on StyleGAN and CLIP, both of which are prone to biases inherited from their training data and their architecture. These may include (but are not limited to) poor representation of minorities or the perpetution of societal biases, such as gender norms. Moreover, generative models can, and have been used to create deep fake imagery which may assist in the spread of propaganda. However, <a href='https://github.com/NVlabs/stylegan3-detector' target='_blank'>tools are available</a> for identifying StyleGAN generated imagery, and any 'realistic' results produced by this model should be easily identifiable through such tools.</div>") | |
with gr.Row(): | |
input_img = gr.inputs.Image(type="filepath", label="Input image") | |
with gr.Column(): | |
style_choice = gr.inputs.CheckboxGroup(choices=editor.get_style_list(), type="value", label="Choose your styles!") | |
editing_type_choice = gr.Radio(choices=["None", "InterFaceGAN", "StyleCLIP"], label="Choose latent space editing option. For InterFaceGAN and StyleCLIP, set the options below:") | |
with gr.Tabs(): | |
with gr.TabItem("InterFaceGAN Editing Options"): | |
gr.Markdown("Move the sliders to make the chosen attribute stronger (e.g. the person older) or leave at 0 to disable editing.") | |
gr.Markdown("If multiple options are provided, they will be used randomly between images (or sequentially for a video), <u>not</u> together.") | |
gr.Markdown("Please note that some directions may be entangled. For example, hair length adjustments are likely to also modify the perceived gender.") | |
gr.Markdown("For more information about InterFaceGAN, please visit <a href='https://github.com/genforce/interfacegan' target='_blank'>the official repository</a>") | |
pose_slider = gr.Slider(label="Pose", minimum=-1, maximum=1, value=0, step=0.05) | |
smile_slider = gr.Slider(label="Smile", minimum=-1, maximum=1, value=0, step=0.05) | |
gender_slider = gr.Slider(label="Perceived Gender", minimum=-1, maximum=1, value=0, step=0.05) | |
age_slider = gr.Slider(label="Age", minimum=-1, maximum=1, value=0, step=0.05) | |
hair_slider = gr.Slider(label="Hair Length", minimum=-1, maximum=1, value=0, step=0.05) | |
ig_edit_choices = [pose_slider, smile_slider, gender_slider, age_slider, hair_slider] | |
with gr.TabItem("StyleCLIP Editing Options"): | |
gr.Markdown("Choose source and target descriptors, such as 'face with hair' to 'face with curly hair'") | |
gr.Markdown("Editing strength controls the magnitude of change. Disentanglement thresholds limits the number of channels the network can modify, reducing possible leak into other attributes. Setting the threshold too high may lead to no available channels. If you see an error, lower the threshold and try again.") | |
gr.Markdown("For more information about StyleCLIP, please visit <a href='https://github.com/orpatashnik/StyleCLIP' target='_blank'>the official repository</a>") | |
src_text_styleclip = gr.Textbox(label="Source text") | |
tar_text_styleclip = gr.Textbox(label="Target text") | |
alpha_styleclip = gr.Slider(label="Edit strength", minimum=-10, maximum=10, value=0, step=0.1) | |
beta_styleclip = gr.Slider(label="Disentanglement Threshold", minimum=0.08, maximum=0.3, value=0.14, step=0.01) | |
sc_edit_choices = [src_text_styleclip, tar_text_styleclip, alpha_styleclip, beta_styleclip] | |
with gr.Tabs(): | |
with gr.TabItem("Edit Images"): | |
with gr.Row(): | |
with gr.Column(): | |
with gr.Row(): | |
img_button = gr.Button("Edit Image") | |
with gr.Column(): | |
img_output = gr.Gallery(label="Output Images") | |
with gr.TabItem("Create Video"): | |
with gr.Row(): | |
with gr.Column(): | |
with gr.Row(): | |
vid_button = gr.Button("Generate Video") | |
loop_styles = gr.inputs.Checkbox(default=True, label="Loop video back to the initial style?") | |
with gr.Row(): | |
with gr.Column(): | |
gr.Markdown("Warning: Videos generation requires the synthesis of hundreds of frames and is expected to take several minutes.") | |
gr.Markdown("To reduce queue times, we significantly reduced the number of video frames. Using more than 3 styles will further reduce the frames per style, leading to quicker transitions. For better control, we recommend cloning the gradio app, adjusting <b>num_alphas</b> in <b>generate_videos.py</b>, and running the code locally.") | |
with gr.Column(): | |
vid_output = gr.outputs.Video(label="Output Video") | |
edit_inputs = [editing_type_choice] + ig_edit_choices + sc_edit_choices | |
img_button.click(fn=editor.edit_image, inputs=edit_inputs + [input_img, style_choice], outputs=img_output) | |
vid_button.click(fn=editor.edit_video, inputs=edit_inputs + [input_img, style_choice, loop_styles], outputs=vid_output) | |
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2108.00946' target='_blank'>StyleGAN-NADA: CLIP-Guided Domain Adaptation of Image Generators</a> | <a href='https://stylegan-nada.github.io/' target='_blank'>Project Page</a> | <a href='https://github.com/rinongal/StyleGAN-nada' target='_blank'>Code</a></p> <center><img src='https://visitor-badge.glitch.me/badge?page_id=rinong_sgnada' alt='visitor badge'></center>" | |
gr.Markdown(article) | |
blocks.launch(enable_queue=True) |