|
|
|
|
|
!pip install -q --upgrade transformers==4.25.1 diffusers ftfy |
|
!pip install accelerate -q |
|
|
|
from base64 import b64encode |
|
|
|
import numpy |
|
import torch |
|
from diffusers import AutoencoderKL, LMSDiscreteScheduler, UNet2DConditionModel |
|
from huggingface_hub import notebook_login |
|
|
|
|
|
from IPython.display import HTML |
|
from matplotlib import pyplot as plt |
|
from pathlib import Path |
|
from PIL import Image |
|
from torch import autocast |
|
from torchvision import transforms as tfms |
|
from tqdm.auto import tqdm |
|
from transformers import CLIPTextModel, CLIPTokenizer, logging |
|
|
|
torch.manual_seed(1) |
|
|
|
|
|
|
|
logging.set_verbosity_error() |
|
|
|
|
|
torch_device = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
|
import os |
|
MY_TOKEN=os.environ.get('HF_TOKEN_SD') |
|
|
|
|
|
|
|
vae = AutoencoderKL.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="vae",use_auth_token=MY_TOKEN) |
|
|
|
|
|
tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14") |
|
text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14") |
|
|
|
|
|
unet = UNet2DConditionModel.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="unet") |
|
|
|
|
|
scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000) |
|
|
|
|
|
vae = vae.to(torch_device) |
|
text_encoder = text_encoder.to(torch_device) |
|
unet = unet.to(torch_device) |
|
|
|
"""Functions""" |
|
|
|
def pil_to_latent(input_im): |
|
|
|
with torch.no_grad(): |
|
latent = vae.encode(tfms.ToTensor()(input_im).unsqueeze(0).to(torch_device)*2-1) |
|
return 0.18215 * latent.latent_dist.sample() |
|
|
|
def latents_to_pil(latents): |
|
|
|
latents = (1 / 0.18215) * latents |
|
with torch.no_grad(): |
|
image = vae.decode(latents).sample |
|
image = (image / 2 + 0.5).clamp(0, 1) |
|
image = image.detach().cpu().permute(0, 2, 3, 1).numpy() |
|
images = (image * 255).round().astype("uint8") |
|
pil_images = [Image.fromarray(image) for image in images] |
|
return pil_images |
|
|
|
|
|
def get_output_embeds(input_embeddings): |
|
|
|
bsz, seq_len = input_embeddings.shape[:2] |
|
causal_attention_mask = text_encoder.text_model._build_causal_attention_mask(bsz, seq_len, dtype=input_embeddings.dtype) |
|
|
|
|
|
|
|
encoder_outputs = text_encoder.text_model.encoder( |
|
inputs_embeds=input_embeddings, |
|
attention_mask=None, |
|
causal_attention_mask=causal_attention_mask.to(torch_device), |
|
output_attentions=None, |
|
output_hidden_states=True, |
|
return_dict=None, |
|
) |
|
|
|
|
|
output = encoder_outputs[0] |
|
|
|
|
|
output = text_encoder.text_model.final_layer_norm(output) |
|
|
|
|
|
return output |
|
|
|
|
|
|
|
def generate_with_embs(text_embeddings, text_input): |
|
height = 512 |
|
width = 512 |
|
num_inference_steps = 10 |
|
guidance_scale = 7.5 |
|
generator = torch.manual_seed(64) |
|
batch_size = 1 |
|
|
|
max_length = text_input.input_ids.shape[-1] |
|
uncond_input = tokenizer( |
|
[""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt" |
|
) |
|
with torch.no_grad(): |
|
uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0] |
|
text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) |
|
|
|
|
|
scheduler.set_timesteps(num_inference_steps) |
|
|
|
|
|
latents = torch.randn( |
|
(batch_size, unet.config.in_channels, height // 8, width // 8), |
|
generator=generator, |
|
) |
|
latents = latents.to(torch_device) |
|
latents = latents * scheduler.init_noise_sigma |
|
|
|
|
|
for i, t in tqdm(enumerate(scheduler.timesteps)): |
|
|
|
latent_model_input = torch.cat([latents] * 2) |
|
sigma = scheduler.sigmas[i] |
|
latent_model_input = scheduler.scale_model_input(latent_model_input, t) |
|
|
|
|
|
with torch.no_grad(): |
|
noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"] |
|
|
|
|
|
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) |
|
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) |
|
|
|
|
|
latents = scheduler.step(noise_pred, t, latents).prev_sample |
|
|
|
return latents_to_pil(latents)[0] |
|
|
|
def ref_loss(images,ref_image): |
|
|
|
error = torch.abs(images - ref_image).mean() |
|
return error |
|
|
|
def inference(prompt, style_index): |
|
|
|
styles = ['<midjourney-style>', '<hitokomoru-style>','<birb-style>','<summie-style>','<illustration-style>','<m-geo>','<buhu>'] |
|
embed = ['learned_embeds_m.bin','learned_embeds_h.bin', 'learned_embeds.bin', 'learned_embeds_s.bin','learned_embeds_i.bin','learned_embeds_mg.bin','learned_embeds_buhu'] |
|
|
|
|
|
|
|
text_input = tokenizer(prompt+" .", padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt") |
|
|
|
token_emb_layer = text_encoder.text_model.embeddings.token_embedding |
|
token_embeddings = token_emb_layer(text_input.input_ids.to(torch_device)) |
|
pos_emb_layer = text_encoder.text_model.embeddings.position_embedding |
|
|
|
position_ids = text_encoder.text_model.embeddings.position_ids[:, :77] |
|
position_embeddings = pos_emb_layer(position_ids) |
|
|
|
|
|
input_ids = text_input.input_ids.to(torch_device) |
|
|
|
|
|
token_embeddings = token_emb_layer(input_ids) |
|
|
|
|
|
input_embeddings = token_embeddings + position_embeddings |
|
|
|
|
|
modified_output_embeddings = get_output_embeds(input_embeddings) |
|
|
|
|
|
image1 = generate_with_embs(modified_output_embeddings,text_input) |
|
|
|
replace_id=269 |
|
|
|
|
|
style = styles[style_index] |
|
emb = embed[style_index] |
|
|
|
x_embed = torch.load(emb) |
|
|
|
|
|
replacement_token_embedding = x_embed[style].to(torch_device) |
|
|
|
|
|
token_embeddings[0, torch.where(input_ids[0]==replace_id)] = replacement_token_embedding.to(torch_device) |
|
|
|
|
|
input_embeddings = token_embeddings + position_embeddings |
|
|
|
|
|
modified_output_embeddings = get_output_embeds(input_embeddings) |
|
|
|
|
|
image2 = generate_with_embs(modified_output_embeddings,text_input) |
|
|
|
prompt1 = 'rainbow' |
|
|
|
|
|
text_input1 = tokenizer(prompt1, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt") |
|
|
|
|
|
token_emb_layer = text_encoder.text_model.embeddings.token_embedding |
|
|
|
pos_emb_layer = text_encoder.text_model.embeddings.position_embedding |
|
position_ids = text_encoder.text_model.embeddings.position_ids[:, :77] |
|
position_embeddings1 = pos_emb_layer(position_ids) |
|
|
|
input_ids1 = text_input1.input_ids.to(torch_device) |
|
|
|
|
|
token_embeddings1 = token_emb_layer(input_ids1) |
|
|
|
|
|
input_embeddings1 = token_embeddings1 + position_embeddings1 |
|
|
|
|
|
modified_output_embeddings1 = get_output_embeds(input_embeddings1) |
|
|
|
|
|
ref_image = generate_with_embs(modified_output_embeddings1, text_input1) |
|
|
|
ref_latent = pil_to_latent(ref_image) |
|
|
|
height = 512 |
|
width = 512 |
|
num_inference_steps = 10 |
|
guidance_scale = 8 |
|
generator = torch.manual_seed(64) |
|
batch_size = 1 |
|
blue_loss_scale = 200 |
|
|
|
|
|
text_input = tokenizer([prompt], padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt") |
|
with torch.no_grad(): |
|
text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0] |
|
|
|
|
|
max_length = text_input.input_ids.shape[-1] |
|
uncond_input = tokenizer( |
|
[""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt" |
|
) |
|
with torch.no_grad(): |
|
uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0] |
|
text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) |
|
|
|
|
|
scheduler.set_timesteps(num_inference_steps) |
|
|
|
|
|
latents = torch.randn( |
|
(batch_size, unet.config.in_channels, height // 8, width // 8), |
|
generator=generator, |
|
) |
|
latents = latents.to(torch_device) |
|
latents = latents * scheduler.init_noise_sigma |
|
|
|
|
|
for i, t in tqdm(enumerate(scheduler.timesteps)): |
|
|
|
latent_model_input = torch.cat([latents] * 2) |
|
sigma = scheduler.sigmas[i] |
|
latent_model_input = scheduler.scale_model_input(latent_model_input, t) |
|
|
|
|
|
with torch.no_grad(): |
|
noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"] |
|
|
|
|
|
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) |
|
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) |
|
|
|
|
|
if i%5 == 0: |
|
|
|
latents = latents.detach().requires_grad_() |
|
|
|
|
|
|
|
latents_x0 = scheduler.step(noise_pred, t, latents).pred_original_sample |
|
|
|
|
|
denoised_images = vae.decode((1 / 0.18215) * latents_x0).sample / 2 + 0.5 |
|
|
|
|
|
with torch.no_grad(): |
|
ref_images = vae.decode((1 / 0.18215) * ref_latent).sample / 2 + 0.5 |
|
|
|
|
|
loss = ref_loss(denoised_images,ref_images) * blue_loss_scale |
|
|
|
|
|
|
|
|
|
|
|
|
|
cond_grad = torch.autograd.grad(loss, latents)[0] |
|
|
|
|
|
latents = latents.detach() - cond_grad * sigma**2 |
|
scheduler._step_index = scheduler._step_index - 1 |
|
|
|
|
|
|
|
latents = scheduler.step(noise_pred, t, latents).prev_sample |
|
|
|
|
|
|
|
image3 = latents_to_pil(latents)[0] |
|
|
|
return (image1, 'Original Image'), (image2, 'Styled Image'), (image3, 'After Textual Inversion') |
|
|
|
|
|
|
|
title="Textual Inversion in Stable Diffusion" |
|
description="<p style='text-align: center;'>Textual Inversion in Stable Diffusion.</b></p>" |
|
gallery = gr.Gallery(label="Generated images", show_label=True, elem_id="gallery", columns=3).style(grid=[2], height="auto") |
|
|
|
gr.Interface(fn=inference, inputs=["text", |
|
|
|
gr.Radio([('<midjourney-style>',0), ('<hitokomoru-style>',1),('<birb-style>',2), |
|
('<summie-style>',3),('<illustration-style>',4),('<m-geo>',5),('<buhu>',6)] , value = 0, label = 'Please select your choice of Style')], |
|
outputs = gallery, title = title, |
|
examples = [['a girl playing in snow',0], |
|
['an oil painting of a goddess',6], |
|
['a rabbit on the moon', 5 ]], ).launch(debug=True) |
|
|
|
|