Spaces:
Runtime error
Runtime error
#!pip install -q --upgrade transformers diffusers ftfy | |
#!pip install -q --upgrade transformers==4.25.1 diffusers ftfy | |
#!pip install accelerate -q | |
from base64 import b64encode | |
import numpy | |
import torch | |
from diffusers import AutoencoderKL, LMSDiscreteScheduler, UNet2DConditionModel | |
from huggingface_hub import notebook_login | |
# For video display: | |
from IPython.display import HTML | |
from matplotlib import pyplot as plt | |
from pathlib import Path | |
from PIL import Image | |
from torch import autocast | |
from torchvision import transforms as tfms | |
from tqdm.auto import tqdm | |
from transformers import CLIPTextModel, CLIPTokenizer, logging | |
import gradio as gr | |
torch.manual_seed(1) | |
#if not (Path.home()/'.huggingface'/'token').exists(): notebook_login() | |
# Supress some unnecessary warnings when loading the CLIPTextModel | |
logging.set_verbosity_error() | |
# Set device | |
torch_device = "cuda" if torch.cuda.is_available() else "cpu" | |
import os | |
MY_TOKEN=os.environ.get('HF_TOKEN_SD') | |
# Load the autoencoder model which will be used to decode the latents into image space. | |
vae = AutoencoderKL.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="vae",use_auth_token=MY_TOKEN) | |
# Load the tokenizer and text encoder to tokenize and encode the text. | |
tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14") | |
text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14") | |
# The UNet model for generating the latents. | |
unet = UNet2DConditionModel.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="unet") | |
# The noise scheduler | |
scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000) | |
# To the GPU we go! | |
vae = vae.to(torch_device) | |
text_encoder = text_encoder.to(torch_device) | |
unet = unet.to(torch_device) | |
"""Functions""" | |
def pil_to_latent(input_im): | |
# Single image -> single latent in a batch (so size 1, 4, 64, 64) | |
with torch.no_grad(): | |
latent = vae.encode(tfms.ToTensor()(input_im).unsqueeze(0).to(torch_device)*2-1) # Note scaling | |
return 0.18215 * latent.latent_dist.sample() | |
def latents_to_pil(latents): | |
# bath of latents -> list of images | |
latents = (1 / 0.18215) * latents | |
with torch.no_grad(): | |
image = vae.decode(latents).sample | |
image = (image / 2 + 0.5).clamp(0, 1) | |
image = image.detach().cpu().permute(0, 2, 3, 1).numpy() | |
images = (image * 255).round().astype("uint8") | |
pil_images = [Image.fromarray(image) for image in images] | |
return pil_images | |
def get_output_embeds(input_embeddings): | |
# CLIP's text model uses causal mask, so we prepare it here: | |
bsz, seq_len = input_embeddings.shape[:2] | |
causal_attention_mask = text_encoder.text_model._build_causal_attention_mask(bsz, seq_len, dtype=input_embeddings.dtype) | |
# Getting the output embeddings involves calling the model with passing output_hidden_states=True | |
# so that it doesn't just return the pooled final predictions: | |
encoder_outputs = text_encoder.text_model.encoder( | |
inputs_embeds=input_embeddings, | |
attention_mask=None, # We aren't using an attention mask so that can be None | |
causal_attention_mask=causal_attention_mask.to(torch_device), | |
output_attentions=None, | |
output_hidden_states=True, # We want the output embs not the final output | |
return_dict=None, | |
) | |
# We're interested in the output hidden state only | |
output = encoder_outputs[0] | |
# There is a final layer norm we need to pass these through | |
output = text_encoder.text_model.final_layer_norm(output) | |
# And now they're ready! | |
return output | |
#Generating an image with these modified embeddings | |
def generate_with_embs(text_embeddings, text_input): | |
height = 512 # default height of Stable Diffusion | |
width = 512 # default width of Stable Diffusion | |
num_inference_steps = 10 # Number of denoising steps | |
guidance_scale = 7.5 # Scale for classifier-free guidance | |
generator = torch.manual_seed(64) # Seed generator to create the inital latent noise | |
batch_size = 1 | |
max_length = text_input.input_ids.shape[-1] | |
uncond_input = tokenizer( | |
[""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt" | |
) | |
with torch.no_grad(): | |
uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0] | |
text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) | |
# Prep Scheduler | |
scheduler.set_timesteps(num_inference_steps) | |
# Prep latents | |
latents = torch.randn( | |
(batch_size, unet.config.in_channels, height // 8, width // 8), | |
generator=generator, | |
) | |
latents = latents.to(torch_device) | |
latents = latents * scheduler.init_noise_sigma | |
# Loop | |
for i, t in tqdm(enumerate(scheduler.timesteps)): | |
# expand the latents if we are doing classifier-free guidance to avoid doing two forward passes. | |
latent_model_input = torch.cat([latents] * 2) | |
sigma = scheduler.sigmas[i] | |
latent_model_input = scheduler.scale_model_input(latent_model_input, t) | |
# predict the noise residual | |
with torch.no_grad(): | |
noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"] | |
# perform guidance | |
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) | |
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) | |
# compute the previous noisy sample x_t -> x_t-1 | |
latents = scheduler.step(noise_pred, t, latents).prev_sample | |
return latents_to_pil(latents)[0] | |
def ref_loss(images,ref_image): | |
# Reference image | |
error = torch.abs(images - ref_image).mean() | |
return error | |
def inference(prompt, style_index): | |
styles = ['<midjourney-style>', '<hitokomoru-style>','<birb-style>','<summie-style>','<illustration-style>','<m-geo>','<buhu>'] | |
embed = ['learned_embeds_m.bin','learned_embeds_h.bin', 'learned_embeds.bin', 'learned_embeds_s.bin','learned_embeds_i.bin','learned_embeds_mg.bin','learned_embeds_buhu.bin'] | |
# Tokenize | |
text_input = tokenizer(prompt+" .", padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt") | |
# Access the embedding layer | |
token_emb_layer = text_encoder.text_model.embeddings.token_embedding | |
token_embeddings = token_emb_layer(text_input.input_ids.to(torch_device)) | |
pos_emb_layer = text_encoder.text_model.embeddings.position_embedding | |
position_ids = text_encoder.text_model.embeddings.position_ids[:, :77] | |
position_embeddings = pos_emb_layer(position_ids) | |
## Without any Textual Inversion | |
input_ids = text_input.input_ids.to(torch_device) | |
# Get token embeddings | |
token_embeddings = token_emb_layer(input_ids) | |
# Combine with pos embs | |
input_embeddings = token_embeddings + position_embeddings | |
# Feed through to get final output embs | |
modified_output_embeddings = get_output_embeds(input_embeddings) | |
# And generate an image with this: | |
image1 = generate_with_embs(modified_output_embeddings,text_input) | |
replace_id=269 #replaced dot with Textual Inversion | |
## midjourney-style | |
style = styles[style_index] | |
emb = embed[style_index] | |
x_embed = torch.load(emb) | |
# The new embedding - our special birb word | |
replacement_token_embedding = x_embed[style].to(torch_device) | |
# Insert this into the token embeddings | |
token_embeddings[0, torch.where(input_ids[0]==replace_id)] = replacement_token_embedding.to(torch_device) | |
# Combine with pos embs | |
input_embeddings = token_embeddings + position_embeddings | |
# Feed through to get final output embs | |
modified_output_embeddings = get_output_embeds(input_embeddings) | |
# And generate an image with this: | |
image2 = generate_with_embs(modified_output_embeddings,text_input) | |
prompt1 = 'rainbow' | |
# Tokenize | |
text_input1 = tokenizer(prompt1, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt") | |
# Access the embedding layer | |
token_emb_layer = text_encoder.text_model.embeddings.token_embedding | |
pos_emb_layer = text_encoder.text_model.embeddings.position_embedding | |
position_ids = text_encoder.text_model.embeddings.position_ids[:, :77] | |
position_embeddings1 = pos_emb_layer(position_ids) | |
input_ids1 = text_input1.input_ids.to(torch_device) | |
# Get token embeddings | |
token_embeddings1 = token_emb_layer(input_ids1) | |
# Combine with pos embs | |
input_embeddings1 = token_embeddings1 + position_embeddings1 | |
# Feed through to get final output embs | |
modified_output_embeddings1 = get_output_embeds(input_embeddings1) | |
# And generate an image with this: | |
ref_image = generate_with_embs(modified_output_embeddings1, text_input1) | |
ref_latent = pil_to_latent(ref_image) | |
height = 512 # default height of Stable Diffusion | |
width = 512 # default width of Stable Diffusion | |
num_inference_steps = 10 # # Number of denoising steps | |
guidance_scale = 8 # # Scale for classifier-free guidance | |
generator = torch.manual_seed(64) # Seed generator to create the inital latent noise | |
batch_size = 1 | |
blue_loss_scale = 200 # | |
# Prep text | |
text_input = tokenizer([prompt], padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt") | |
with torch.no_grad(): | |
text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0] | |
# And the uncond. input as before: | |
max_length = text_input.input_ids.shape[-1] | |
uncond_input = tokenizer( | |
[""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt" | |
) | |
with torch.no_grad(): | |
uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0] | |
text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) | |
# Prep Scheduler | |
scheduler.set_timesteps(num_inference_steps) | |
# Prep latents | |
latents = torch.randn( | |
(batch_size, unet.config.in_channels, height // 8, width // 8), | |
generator=generator, | |
) | |
latents = latents.to(torch_device) | |
latents = latents * scheduler.init_noise_sigma | |
# Loop | |
for i, t in tqdm(enumerate(scheduler.timesteps)): | |
# expand the latents if we are doing classifier-free guidance to avoid doing two forward passes. | |
latent_model_input = torch.cat([latents] * 2) | |
sigma = scheduler.sigmas[i] | |
latent_model_input = scheduler.scale_model_input(latent_model_input, t) | |
# predict the noise residual | |
with torch.no_grad(): | |
noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"] | |
# perform CFG | |
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) | |
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) | |
#### ADDITIONAL GUIDANCE ### | |
if i%5 == 0: | |
# Requires grad on the latents | |
latents = latents.detach().requires_grad_() | |
# Get the predicted x0: | |
# latents_x0 = latents - sigma * noise_pred | |
latents_x0 = scheduler.step(noise_pred, t, latents).pred_original_sample | |
# Decode to image space | |
denoised_images = vae.decode((1 / 0.18215) * latents_x0).sample / 2 + 0.5 # range (0, 1) | |
#ref image | |
with torch.no_grad(): | |
ref_images = vae.decode((1 / 0.18215) * ref_latent).sample / 2 + 0.5 # range (0, 1) | |
# Calculate loss | |
loss = ref_loss(denoised_images,ref_images) * blue_loss_scale | |
# Occasionally print it out | |
# if i%10==0: | |
# print(i, 'loss:', loss.item()) | |
# Get gradient | |
cond_grad = torch.autograd.grad(loss, latents)[0] | |
# Modify the latents based on this gradient | |
latents = latents.detach() - cond_grad * sigma**2 | |
scheduler._step_index = scheduler._step_index - 1 | |
# Now step with scheduler | |
latents = scheduler.step(noise_pred, t, latents).prev_sample | |
#latents = scheduler.step(noise_pred, t, latents).pred_original_sample | |
image3 = latents_to_pil(latents)[0] | |
return (image1, 'Original Image'), (image2, 'Styled Image'), (image3, 'After Textual Inversion') | |
# Gradio App with num_inference_steps=10 | |
title="Textual Inversion in Stable Diffusion" | |
description="<p style='text-align: center;'>Textual Inversion in Stable Diffusion.</b></p>" | |
gallery = gr.Gallery(label="Generated images", show_label=True, elem_id="gallery", columns=3).style(grid=[2], height="auto") | |
gr.Interface(fn=inference, inputs=["text", | |
gr.Radio([('<midjourney-style>',0), ('<hitokomoru-style>',1),('<birb-style>',2), | |
('<summie-style>',3),('<illustration-style>',4),('<m-geo>',5),('<buhu>',6)] , value = 0, label = 'Style')], | |
outputs = gallery, title = title, | |
examples = [['a girl playing in snow',0], | |
#['an oil painting of a goddess',6], | |
#['a rabbit on the moon', 5 ] | |
], | |
).launch(debug=True) | |