rodrigomasini's picture
Update app.py
76a8aa4 verified
raw
history blame contribute delete
No virus
82.8 kB
from email.policy import default
from json import encoder
import gradio as gr
import spaces
import numpy as np
import torch
import requests
import random
import os
import sys
import pickle
from PIL import Image
from tqdm.auto import tqdm
from datetime import datetime
import torch.nn as nn
import torch.nn.functional as F
class AttnProcessor(nn.Module):
r"""
Default processor for performing attention-related computations.
"""
def __init__(
self,
hidden_size=None,
cross_attention_dim=None,
):
super().__init__()
def __call__(
self,
attn,
hidden_states,
encoder_hidden_states=None,
attention_mask=None,
temb=None,
):
residual = hidden_states
if attn.spatial_norm is not None:
hidden_states = attn.spatial_norm(hidden_states, temb)
input_ndim = hidden_states.ndim
if input_ndim == 4:
batch_size, channel, height, width = hidden_states.shape
hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
batch_size, sequence_length, _ = (
hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
)
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
if attn.group_norm is not None:
hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
query = attn.to_q(hidden_states)
if encoder_hidden_states is None:
encoder_hidden_states = hidden_states
elif attn.norm_cross:
encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
key = attn.to_k(encoder_hidden_states)
value = attn.to_v(encoder_hidden_states)
query = attn.head_to_batch_dim(query)
key = attn.head_to_batch_dim(key)
value = attn.head_to_batch_dim(value)
attention_probs = attn.get_attention_scores(query, key, attention_mask)
hidden_states = torch.bmm(attention_probs, value)
hidden_states = attn.batch_to_head_dim(hidden_states)
# linear proj
hidden_states = attn.to_out[0](hidden_states)
# dropout
hidden_states = attn.to_out[1](hidden_states)
if input_ndim == 4:
hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
if attn.residual_connection:
hidden_states = hidden_states + residual
hidden_states = hidden_states / attn.rescale_output_factor
return hidden_states
import diffusers
from diffusers import StableDiffusionXLPipeline
from diffusers import DDIMScheduler
import torch.nn.functional as F
from transformers.models.clip.configuration_clip import CLIPVisionConfig
def cal_attn_mask(total_length,id_length,sa16,sa32,sa64,device="cuda",dtype= torch.float16):
bool_matrix256 = torch.rand((1, total_length * 256),device = device,dtype = dtype) < sa16
bool_matrix1024 = torch.rand((1, total_length * 1024),device = device,dtype = dtype) < sa32
bool_matrix4096 = torch.rand((1, total_length * 4096),device = device,dtype = dtype) < sa64
bool_matrix256 = bool_matrix256.repeat(total_length,1)
bool_matrix1024 = bool_matrix1024.repeat(total_length,1)
bool_matrix4096 = bool_matrix4096.repeat(total_length,1)
for i in range(total_length):
bool_matrix256[i:i+1,id_length*256:] = False
bool_matrix1024[i:i+1,id_length*1024:] = False
bool_matrix4096[i:i+1,id_length*4096:] = False
bool_matrix256[i:i+1,i*256:(i+1)*256] = True
bool_matrix1024[i:i+1,i*1024:(i+1)*1024] = True
bool_matrix4096[i:i+1,i*4096:(i+1)*4096] = True
mask256 = bool_matrix256.unsqueeze(1).repeat(1,256,1).reshape(-1,total_length * 256)
mask1024 = bool_matrix1024.unsqueeze(1).repeat(1,1024,1).reshape(-1,total_length * 1024)
mask4096 = bool_matrix4096.unsqueeze(1).repeat(1,4096,1).reshape(-1,total_length * 4096)
return mask256,mask1024,mask4096
def cal_attn_mask_xl(total_length,id_length,sa32,sa64,height,width,device="cuda",dtype= torch.float16):
nums_1024 = (height // 32) * (width // 32)
nums_4096 = (height // 16) * (width // 16)
bool_matrix1024 = torch.rand((1, total_length * nums_1024),device = device,dtype = dtype) < sa32
bool_matrix4096 = torch.rand((1, total_length * nums_4096),device = device,dtype = dtype) < sa64
bool_matrix1024 = bool_matrix1024.repeat(total_length,1)
bool_matrix4096 = bool_matrix4096.repeat(total_length,1)
for i in range(total_length):
bool_matrix1024[i:i+1,id_length*nums_1024:] = False
bool_matrix4096[i:i+1,id_length*nums_4096:] = False
bool_matrix1024[i:i+1,i*nums_1024:(i+1)*nums_1024] = True
bool_matrix4096[i:i+1,i*nums_4096:(i+1)*nums_4096] = True
mask1024 = bool_matrix1024.unsqueeze(1).repeat(1,nums_1024,1).reshape(-1,total_length * nums_1024)
mask4096 = bool_matrix4096.unsqueeze(1).repeat(1,nums_4096,1).reshape(-1,total_length * nums_4096)
return mask1024,mask4096
import copy
import os
from huggingface_hub import hf_hub_download
from diffusers.utils import load_image
from transformers.models.clip.modeling_clip import CLIPVisionModelWithProjection
from email.mime import image
import torch
import base64
import gradio as gr
import numpy as np
from PIL import Image,ImageOps,ImageDraw, ImageFont
from io import BytesIO
import random
MAX_COLORS = 12
def get_random_bool():
return random.choice([True, False])
def add_white_border(input_image, border_width=10):
border_color = 'white'
img_with_border = ImageOps.expand(input_image, border=border_width, fill=border_color)
return img_with_border
def process_mulline_text(draw, text, font, max_width):
"""
Draw the text on an image with word wrapping.
"""
lines = [] # Store the lines of text here
words = text.split()
# Start building lines of text, and wrap when necessary
current_line = ""
for word in words:
test_line = f"{current_line} {word}".strip()
# Check the width of the line with this word added
width, _ = draw.textsize(test_line, font=font)
if width <= max_width:
# If it fits, add this word to the current line
current_line = test_line
else:
# If not, store the line and start a new one
lines.append(current_line)
current_line = word
# Add the last line
lines.append(current_line)
return lines
def add_caption(image, text, position = "bottom-mid", font = None, text_color= 'black', bg_color = (255, 255, 255) , bg_opacity = 200):
if text == "":
return image
image = image.convert("RGBA")
draw = ImageDraw.Draw(image)
width, height = image.size
lines = process_mulline_text(draw,text,font,width)
text_positions = []
maxwidth = 0
for ind, line in enumerate(lines[::-1]):
text_width, text_height = draw.textsize(line, font=font)
if position == 'bottom-right':
text_position = (width - text_width - 10, height - (text_height + 20))
elif position == 'bottom-left':
text_position = (10, height - (text_height + 20))
elif position == 'bottom-mid':
text_position = ((width - text_width) // 2, height - (text_height + 20) ) # 居中文本
height = text_position[1]
maxwidth = max(maxwidth,text_width)
text_positions.append(text_position)
rectpos = (width - maxwidth) // 2
rectangle_position = [rectpos - 5, text_positions[-1][1] - 5, rectpos + maxwidth + 5, text_positions[0][1] + text_height + 5]
image_with_transparency = Image.new('RGBA', image.size)
draw_with_transparency = ImageDraw.Draw(image_with_transparency)
draw_with_transparency.rectangle(rectangle_position, fill=bg_color + (bg_opacity,))
image.paste(Image.alpha_composite(image.convert('RGBA'), image_with_transparency))
print(ind,text_position)
draw = ImageDraw.Draw(image)
for ind, line in enumerate(lines[::-1]):
text_position = text_positions[ind]
draw.text(text_position, line, fill=text_color, font=font)
return image.convert('RGB')
def get_comic(images,types = "4panel",captions = [],font = None,pad_image = None):
if pad_image == None:
pad_image = Image.open("./images/pad_images.png")
if font == None:
font = ImageFont.truetype("./fonts/Inkfree.ttf", int(30 * images[0].size[1] / 1024))
if types == "No typesetting (default)":
return images
elif types == "Four Pannel":
return get_comic_4panel(images,captions,font,pad_image)
else: # "Classic Comic Style"
return get_comic_classical(images,captions,font,pad_image)
def get_caption_group(images_groups,captions = []):
caption_groups = []
for i in range(len(images_groups)):
length = len(images_groups[i])
caption_groups.append(captions[:length])
captions = captions[length:]
if len(caption_groups[-1]) < len(images_groups[-1]):
caption_groups[-1] = caption_groups[-1] + [""] * (len(images_groups[-1]) - len(caption_groups[-1]))
return caption_groups
class MLP(nn.Module):
def __init__(self, in_dim, out_dim, hidden_dim, use_residual=True):
super().__init__()
if use_residual:
assert in_dim == out_dim
self.layernorm = nn.LayerNorm(in_dim)
self.fc1 = nn.Linear(in_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, out_dim)
self.use_residual = use_residual
self.act_fn = nn.GELU()
def forward(self, x):
residual = x
x = self.layernorm(x)
x = self.fc1(x)
x = self.act_fn(x)
x = self.fc2(x)
if self.use_residual:
x = x + residual
return x
def get_comic_classical(images,captions = None,font = None,pad_image = None):
if pad_image == None:
raise ValueError("pad_image is None")
images = [add_white_border(image) for image in images]
pad_image = pad_image.resize(images[0].size, Image.ANTIALIAS)
images_groups = distribute_images2(images,pad_image)
print(images_groups)
if captions != None:
captions_groups = get_caption_group(images_groups,captions)
# print(images_groups)
row_images = []
for ind, img_group in enumerate(images_groups):
row_images.append(get_row_image2(img_group ,captions= captions_groups[ind] if captions != None else None,font = font))
return [combine_images_vertically_with_resize(row_images)]
class FuseModule(nn.Module):
def __init__(self, embed_dim):
super().__init__()
self.mlp1 = MLP(embed_dim * 2, embed_dim, embed_dim, use_residual=False)
self.mlp2 = MLP(embed_dim, embed_dim, embed_dim, use_residual=True)
self.layer_norm = nn.LayerNorm(embed_dim)
def fuse_fn(self, prompt_embeds, id_embeds):
stacked_id_embeds = torch.cat([prompt_embeds, id_embeds], dim=-1)
stacked_id_embeds = self.mlp1(stacked_id_embeds) + prompt_embeds
stacked_id_embeds = self.mlp2(stacked_id_embeds)
stacked_id_embeds = self.layer_norm(stacked_id_embeds)
return stacked_id_embeds
def forward(
self,
prompt_embeds,
id_embeds,
class_tokens_mask,
) -> torch.Tensor:
# id_embeds shape: [b, max_num_inputs, 1, 2048]
id_embeds = id_embeds.to(prompt_embeds.dtype)
num_inputs = class_tokens_mask.sum().unsqueeze(0) # TODO: check for training case
batch_size, max_num_inputs = id_embeds.shape[:2]
# seq_length: 77
seq_length = prompt_embeds.shape[1]
# flat_id_embeds shape: [b*max_num_inputs, 1, 2048]
flat_id_embeds = id_embeds.view(
-1, id_embeds.shape[-2], id_embeds.shape[-1]
)
# valid_id_mask [b*max_num_inputs]
valid_id_mask = (
torch.arange(max_num_inputs, device=flat_id_embeds.device)[None, :]
< num_inputs[:, None]
)
valid_id_embeds = flat_id_embeds[valid_id_mask.flatten()]
prompt_embeds = prompt_embeds.view(-1, prompt_embeds.shape[-1])
class_tokens_mask = class_tokens_mask.view(-1)
valid_id_embeds = valid_id_embeds.view(-1, valid_id_embeds.shape[-1])
# slice out the image token embeddings
image_token_embeds = prompt_embeds[class_tokens_mask]
stacked_id_embeds = self.fuse_fn(image_token_embeds, valid_id_embeds)
assert class_tokens_mask.sum() == stacked_id_embeds.shape[0], f"{class_tokens_mask.sum()} != {stacked_id_embeds.shape[0]}"
prompt_embeds.masked_scatter_(class_tokens_mask[:, None], stacked_id_embeds.to(prompt_embeds.dtype))
updated_prompt_embeds = prompt_embeds.view(batch_size, seq_length, -1)
return updated_prompt_embeds
def get_comic_4panel(images,captions = [],font = None,pad_image = None):
if pad_image == None:
raise ValueError("pad_image is None")
pad_image = pad_image.resize(images[0].size, Image.ANTIALIAS)
images = [add_white_border(image) for image in images]
assert len(captions) == len(images)
for i,caption in enumerate(captions):
images[i] = add_caption(images[i],caption,font = font)
images_nums = len(images)
pad_nums = int((4 - images_nums % 4) % 4)
images = images + [pad_image for _ in range(pad_nums)]
comics = []
assert len(images)%4 == 0
for i in range(len(images)//4):
comics.append(combine_images_vertically_with_resize([combine_images_horizontally(images[i*4:i*4+2]), combine_images_horizontally(images[i*4+2:i*4+4])]))
return comics
def get_row_image(images):
row_image_arr = []
if len(images)>3:
stack_img_nums = (len(images) - 2)//2
else:
stack_img_nums = 0
while(len(images)>0):
if stack_img_nums <=0:
row_image_arr.append(images[0])
images = images[1:]
elif len(images)>stack_img_nums*2:
if get_random_bool():
row_image_arr.append(concat_images_vertically_and_scale(images[:2]))
images = images[2:]
stack_img_nums -=1
else:
row_image_arr.append(images[0])
images = images[1:]
else:
row_image_arr.append(concat_images_vertically_and_scale(images[:2]))
images = images[2:]
stack_img_nums-=1
return combine_images_horizontally(row_image_arr)
def get_row_image2(images,captions = None, font = None):
row_image_arr = []
if len(images)== 6:
sequence_list = [1,1,2,2]
elif len(images)== 4:
sequence_list = [1,1,2]
else:
raise ValueError("images nums is not 4 or 6 found",len(images))
random.shuffle(sequence_list)
index = 0
for length in sequence_list:
if length == 1:
if captions != None:
images_tmp = add_caption(images[0],text = captions[index],font= font)
else:
images_tmp = images[0]
row_image_arr.append( images_tmp)
images = images[1:]
index +=1
elif length == 2:
row_image_arr.append(concat_images_vertically_and_scale(images[:2]))
images = images[2:]
index +=2
return combine_images_horizontally(row_image_arr)
VISION_CONFIG_DICT = {
"hidden_size": 1024,
"intermediate_size": 4096,
"num_attention_heads": 16,
"num_hidden_layers": 24,
"patch_size": 14,
"projection_dim": 768
}
def concat_images_vertically_and_scale(images,scale_factor=2):
widths = [img.width for img in images]
if not all(width == widths[0] for width in widths):
raise ValueError('All images must have the same width.')
total_height = sum(img.height for img in images)
max_width = max(widths)
concatenated_image = Image.new('RGB', (max_width, total_height))
current_height = 0
for img in images:
concatenated_image.paste(img, (0, current_height))
current_height += img.height
new_height = concatenated_image.height // scale_factor
new_width = concatenated_image.width // scale_factor
resized_image = concatenated_image.resize((new_width, new_height), Image.ANTIALIAS)
return resized_image
def combine_images_horizontally(images):
widths, heights = zip(*(i.size for i in images))
total_width = sum(widths)
max_height = max(heights)
new_im = Image.new('RGB', (total_width, max_height))
x_offset = 0
for im in images:
new_im.paste(im, (x_offset, 0))
x_offset += im.width
return new_im
def combine_images_vertically_with_resize(images):
widths, heights = zip(*(i.size for i in images))
min_width = min(widths)
resized_images = []
for img in images:
new_height = int(min_width * img.height / img.width)
resized_img = img.resize((min_width, new_height), Image.ANTIALIAS)
resized_images.append(resized_img)
total_height = sum(img.height for img in resized_images)
new_im = Image.new('RGB', (min_width, total_height))
y_offset = 0
for im in resized_images:
new_im.paste(im, (0, y_offset))
y_offset += im.height
return new_im
def distribute_images2(images, pad_image):
groups = []
remaining = len(images)
if len(images) <= 8:
group_sizes = [4]
else:
group_sizes = [4, 6]
size_index = 0
while remaining > 0:
size = group_sizes[size_index%len(group_sizes)]
if remaining < size and remaining < min(group_sizes):
size = min(group_sizes)
if remaining > size:
new_group = images[-remaining: -remaining + size]
else:
new_group = images[-remaining:]
groups.append(new_group)
size_index += 1
remaining -= size
print(remaining,groups)
groups[-1] = groups[-1] + [pad_image for _ in range(-remaining)]
return groups
def distribute_images(images, group_sizes=(4, 3, 2)):
groups = []
remaining = len(images)
while remaining > 0:
for size in sorted(group_sizes, reverse=True):
if remaining >= size or remaining == len(images):
if remaining > size:
new_group = images[-remaining: -remaining + size]
else:
new_group = images[-remaining:]
groups.append(new_group)
remaining -= size
break
elif remaining < min(group_sizes) and groups:
groups[-1].extend(images[-remaining:])
remaining = 0
return groups
def create_binary_matrix(img_arr, target_color):
mask = np.all(img_arr == target_color, axis=-1)
binary_matrix = mask.astype(int)
return binary_matrix
def preprocess_mask(mask_, h, w, device):
mask = np.array(mask_)
mask = mask.astype(np.float32)
mask = mask[None, None]
mask[mask < 0.5] = 0
mask[mask >= 0.5] = 1
mask = torch.from_numpy(mask).to(device)
mask = torch.nn.functional.interpolate(mask, size=(h, w), mode='nearest')
return mask
def process_sketch(canvas_data):
binary_matrixes = []
base64_img = canvas_data['image']
image_data = base64.b64decode(base64_img.split(',')[1])
image = Image.open(BytesIO(image_data)).convert("RGB")
im2arr = np.array(image)
colors = [tuple(map(int, rgb[4:-1].split(','))) for rgb in canvas_data['colors']]
colors_fixed = []
r, g, b = 255, 255, 255
binary_matrix = create_binary_matrix(im2arr, (r,g,b))
binary_matrixes.append(binary_matrix)
binary_matrix_ = np.repeat(np.expand_dims(binary_matrix, axis=(-1)), 3, axis=(-1))
colored_map = binary_matrix_*(r,g,b) + (1-binary_matrix_)*(50,50,50)
colors_fixed.append(gr.update(value=colored_map.astype(np.uint8)))
for color in colors:
r, g, b = color
if any(c != 255 for c in (r, g, b)):
binary_matrix = create_binary_matrix(im2arr, (r,g,b))
binary_matrixes.append(binary_matrix)
binary_matrix_ = np.repeat(np.expand_dims(binary_matrix, axis=(-1)), 3, axis=(-1))
colored_map = binary_matrix_*(r,g,b) + (1-binary_matrix_)*(50,50,50)
colors_fixed.append(gr.update(value=colored_map.astype(np.uint8)))
visibilities = []
colors = []
for n in range(MAX_COLORS):
visibilities.append(gr.update(visible=False))
colors.append(gr.update())
for n in range(len(colors_fixed)):
visibilities[n] = gr.update(visible=True)
colors[n] = colors_fixed[n]
return [gr.update(visible=True), binary_matrixes, *visibilities, *colors]
def process_prompts(binary_matrixes, *seg_prompts):
return [gr.update(visible=True), gr.update(value=' , '.join(seg_prompts[:len(binary_matrixes)]))]
def process_example(layout_path, all_prompts, seed_):
all_prompts = all_prompts.split('***')
binary_matrixes = []
colors_fixed = []
im2arr = np.array(Image.open(layout_path))[:,:,:3]
unique, counts = np.unique(np.reshape(im2arr,(-1,3)), axis=0, return_counts=True)
sorted_idx = np.argsort(-counts)
binary_matrix = create_binary_matrix(im2arr, (0,0,0))
binary_matrixes.append(binary_matrix)
binary_matrix_ = np.repeat(np.expand_dims(binary_matrix, axis=(-1)), 3, axis=(-1))
colored_map = binary_matrix_*(255,255,255) + (1-binary_matrix_)*(50,50,50)
colors_fixed.append(gr.update(value=colored_map.astype(np.uint8)))
for i in range(len(all_prompts)-1):
r, g, b = unique[sorted_idx[i]]
if any(c != 255 for c in (r, g, b)) and any(c != 0 for c in (r, g, b)):
binary_matrix = create_binary_matrix(im2arr, (r,g,b))
binary_matrixes.append(binary_matrix)
binary_matrix_ = np.repeat(np.expand_dims(binary_matrix, axis=(-1)), 3, axis=(-1))
colored_map = binary_matrix_*(r,g,b) + (1-binary_matrix_)*(50,50,50)
colors_fixed.append(gr.update(value=colored_map.astype(np.uint8)))
visibilities = []
colors = []
prompts = []
for n in range(MAX_COLORS):
visibilities.append(gr.update(visible=False))
colors.append(gr.update())
prompts.append(gr.update())
for n in range(len(colors_fixed)):
visibilities[n] = gr.update(visible=True)
colors[n] = colors_fixed[n]
prompts[n] = all_prompts[n+1]
return [gr.update(visible=True), binary_matrixes, *visibilities, *colors, *prompts,
gr.update(visible=True), gr.update(value=all_prompts[0]), int(seed_)]
style_list = [
{
"name": "(No style)",
"prompt": "{prompt}",
"negative_prompt": "",
},
{
"name": "Cinematic",
"prompt": "cinematic still {prompt} . emotional, harmonious, vignette, highly detailed, high budget, bokeh, cinemascope, moody, epic, gorgeous, film grain, grainy",
"negative_prompt": "anime, cartoon, graphic, text, painting, crayon, graphite, abstract, glitch, deformed, mutated, ugly, disfigured",
},
{
"name": "Photographic",
"prompt": "cinematic photo {prompt} . 35mm photograph, film, bokeh, professional, 4k, highly detailed",
"negative_prompt": "drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly",
}
]
styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list}
image_encoder_path = "./data/models/ip_adapter/sdxl_models/image_encoder"
ip_ckpt = "./data/models/ip_adapter/sdxl_models/ip-adapter_sdxl_vit-h.bin"
os.environ["no_proxy"] = "localhost,127.0.0.1,::1"
STYLE_NAMES = list(styles.keys())
DEFAULT_STYLE_NAME = "Cinematic"
global models_dict
use_va = True
models_dict = {
# "Juggernaut": "RunDiffusion/Juggernaut-XL-v8",
"RealVision": "SG161222/RealVisXL_V4.0" ,
# "SDXL":"stabilityai/stable-diffusion-xl-base-1.0" ,
"Unstable": "stablediffusionapi/sdxl-unstable-diffusers-y"
}
photomaker_path = hf_hub_download(repo_id="TencentARC/PhotoMaker", filename="photomaker-v1.bin", repo_type="model")
MAX_SEED = np.iinfo(np.int32).max
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def set_text_unfinished():
return gr.update(visible=True, value="<h3>(Not Finished) Generating ··· The intermediate results will be shown.</h3>")
def set_text_finished():
return gr.update(visible=True, value="<h3>Generation Finished</h3>")
#################################################
def get_image_path_list(folder_name):
image_basename_list = os.listdir(folder_name)
image_path_list = sorted([os.path.join(folder_name, basename) for basename in image_basename_list])
return image_path_list
#################################################
class SpatialAttnProcessor2_0(torch.nn.Module):
r"""
Attention processor for IP-Adapater for PyTorch 2.0.
Args:
hidden_size (`int`):
The hidden size of the attention layer.
cross_attention_dim (`int`):
The number of channels in the `encoder_hidden_states`.
text_context_len (`int`, defaults to 77):
The context length of the text features.
scale (`float`, defaults to 1.0):
the weight scale of image prompt.
"""
def __init__(self, hidden_size = None, cross_attention_dim=None,id_length = 4,device = "cuda",dtype = torch.float16):
super().__init__()
if not hasattr(F, "scaled_dot_product_attention"):
raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.")
self.device = device
self.dtype = dtype
self.hidden_size = hidden_size
self.cross_attention_dim = cross_attention_dim
self.total_length = id_length + 1
self.id_length = id_length
self.id_bank = {}
def __call__(
self,
attn,
hidden_states,
encoder_hidden_states=None,
attention_mask=None,
temb=None):
# un_cond_hidden_states, cond_hidden_states = hidden_states.chunk(2)
# un_cond_hidden_states = self.__call2__(attn, un_cond_hidden_states,encoder_hidden_states,attention_mask,temb)
global total_count,attn_count,cur_step,mask1024,mask4096
global sa32, sa64
global write
global height,width
global num_steps
if write:
# print(f"white:{cur_step}")
self.id_bank[cur_step] = [hidden_states[:self.id_length], hidden_states[self.id_length:]]
else:
encoder_hidden_states = torch.cat((self.id_bank[cur_step][0].to(self.device),hidden_states[:1],self.id_bank[cur_step][1].to(self.device),hidden_states[1:]))
if cur_step <=1:
hidden_states = self.__call2__(attn, hidden_states,None,attention_mask,temb)
else: # 256 1024 4096
random_number = random.random()
if cur_step <0.4 * num_steps:
rand_num = 0.3
else:
rand_num = 0.1
# print(f"hidden state shape {hidden_states.shape[1]}")
if random_number > rand_num:
# print("mask shape",mask1024.shape,mask4096.shape)
if not write:
if hidden_states.shape[1] == (height//32) * (width//32):
attention_mask = mask1024[mask1024.shape[0] // self.total_length * self.id_length:]
else:
attention_mask = mask4096[mask4096.shape[0] // self.total_length * self.id_length:]
else:
# print(self.total_length,self.id_length,hidden_states.shape,(height//32) * (width//32))
if hidden_states.shape[1] == (height//32) * (width//32):
attention_mask = mask1024[:mask1024.shape[0] // self.total_length * self.id_length,:mask1024.shape[0] // self.total_length * self.id_length]
else:
attention_mask = mask4096[:mask4096.shape[0] // self.total_length * self.id_length,:mask4096.shape[0] // self.total_length * self.id_length]
# print(attention_mask.shape)
# print("before attention",hidden_states.shape,attention_mask.shape,encoder_hidden_states.shape if encoder_hidden_states is not None else "None")
hidden_states = self.__call1__(attn, hidden_states,encoder_hidden_states,attention_mask,temb)
else:
hidden_states = self.__call2__(attn, hidden_states,None,attention_mask,temb)
attn_count +=1
if attn_count == total_count:
attn_count = 0
cur_step += 1
mask1024,mask4096 = cal_attn_mask_xl(self.total_length,self.id_length,sa32,sa64,height,width, device=self.device, dtype= self.dtype)
return hidden_states
def __call1__(
self,
attn,
hidden_states,
encoder_hidden_states=None,
attention_mask=None,
temb=None,
):
# print("hidden state shape",hidden_states.shape,self.id_length)
residual = hidden_states
# if encoder_hidden_states is not None:
# raise Exception("not implement")
if attn.spatial_norm is not None:
hidden_states = attn.spatial_norm(hidden_states, temb)
input_ndim = hidden_states.ndim
if input_ndim == 4:
total_batch_size, channel, height, width = hidden_states.shape
hidden_states = hidden_states.view(total_batch_size, channel, height * width).transpose(1, 2)
total_batch_size,nums_token,channel = hidden_states.shape
img_nums = total_batch_size//2
hidden_states = hidden_states.view(-1,img_nums,nums_token,channel).reshape(-1,img_nums * nums_token,channel)
batch_size, sequence_length, _ = hidden_states.shape
if attn.group_norm is not None:
hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
query = attn.to_q(hidden_states)
if encoder_hidden_states is None:
encoder_hidden_states = hidden_states # B, N, C
else:
encoder_hidden_states = encoder_hidden_states.view(-1,self.id_length+1,nums_token,channel).reshape(-1,(self.id_length+1) * nums_token,channel)
key = attn.to_k(encoder_hidden_states)
value = attn.to_v(encoder_hidden_states)
inner_dim = key.shape[-1]
head_dim = inner_dim // attn.heads
query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
# print(key.shape,value.shape,query.shape,attention_mask.shape)
# the output of sdp = (batch, num_heads, seq_len, head_dim)
# TODO: add support for attn.scale when we move to Torch 2.1
#print(query.shape,key.shape,value.shape,attention_mask.shape)
hidden_states = F.scaled_dot_product_attention(
query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
)
hidden_states = hidden_states.transpose(1, 2).reshape(total_batch_size, -1, attn.heads * head_dim)
hidden_states = hidden_states.to(query.dtype)
# linear proj
hidden_states = attn.to_out[0](hidden_states)
# dropout
hidden_states = attn.to_out[1](hidden_states)
# if input_ndim == 4:
# tile_hidden_states = tile_hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
# if attn.residual_connection:
# tile_hidden_states = tile_hidden_states + residual
if input_ndim == 4:
hidden_states = hidden_states.transpose(-1, -2).reshape(total_batch_size, channel, height, width)
if attn.residual_connection:
hidden_states = hidden_states + residual
hidden_states = hidden_states / attn.rescale_output_factor
# print(hidden_states.shape)
return hidden_states
def __call2__(
self,
attn,
hidden_states,
encoder_hidden_states=None,
attention_mask=None,
temb=None):
residual = hidden_states
if attn.spatial_norm is not None:
hidden_states = attn.spatial_norm(hidden_states, temb)
input_ndim = hidden_states.ndim
if input_ndim == 4:
batch_size, channel, height, width = hidden_states.shape
hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
batch_size, sequence_length, channel = (
hidden_states.shape
)
# print(hidden_states.shape)
if attention_mask is not None:
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
# scaled_dot_product_attention expects attention_mask shape to be
# (batch, heads, source_length, target_length)
attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
if attn.group_norm is not None:
hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
query = attn.to_q(hidden_states)
if encoder_hidden_states is None:
encoder_hidden_states = hidden_states # B, N, C
else:
encoder_hidden_states = encoder_hidden_states.view(-1,self.id_length+1,sequence_length,channel).reshape(-1,(self.id_length+1) * sequence_length,channel)
key = attn.to_k(encoder_hidden_states)
value = attn.to_v(encoder_hidden_states)
inner_dim = key.shape[-1]
head_dim = inner_dim // attn.heads
query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
# the output of sdp = (batch, num_heads, seq_len, head_dim)
# TODO: add support for attn.scale when we move to Torch 2.1
hidden_states = F.scaled_dot_product_attention(
query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
)
hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
hidden_states = hidden_states.to(query.dtype)
# linear proj
hidden_states = attn.to_out[0](hidden_states)
# dropout
hidden_states = attn.to_out[1](hidden_states)
if input_ndim == 4:
hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
if attn.residual_connection:
hidden_states = hidden_states + residual
hidden_states = hidden_states / attn.rescale_output_factor
return hidden_states
def set_attention_processor(unet,id_length,is_ipadapter = False):
global total_count
total_count = 0
attn_procs = {}
for name in unet.attn_processors.keys():
cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim
if name.startswith("mid_block"):
hidden_size = unet.config.block_out_channels[-1]
elif name.startswith("up_blocks"):
block_id = int(name[len("up_blocks.")])
hidden_size = list(reversed(unet.config.block_out_channels))[block_id]
elif name.startswith("down_blocks"):
block_id = int(name[len("down_blocks.")])
hidden_size = unet.config.block_out_channels[block_id]
if cross_attention_dim is None:
if name.startswith("up_blocks") :
attn_procs[name] = SpatialAttnProcessor2_0(id_length = id_length)
total_count +=1
else:
attn_procs[name] = AttnProcessor()
else:
if is_ipadapter:
attn_procs[name] = IPAttnProcessor2_0(
hidden_size=hidden_size,
cross_attention_dim=cross_attention_dim,
scale=1,
num_tokens=4,
).to(unet.device, dtype=torch.float16)
else:
attn_procs[name] = AttnProcessor()
unet.set_attn_processor(copy.deepcopy(attn_procs))
print("successsfully load paired self-attention")
print(f"number of the processor : {total_count}")
canvas_html = "<div id='canvas-root' style='max-width:400px; margin: 0 auto'></div>"
load_js = """
async () => {
const url = "https://huggingface.co/datasets/radames/gradio-components/raw/main/sketch-canvas.js"
fetch(url)
.then(res => res.text())
.then(text => {
const script = document.createElement('script');
script.type = "module"
script.src = URL.createObjectURL(new Blob([text], { type: 'application/javascript' }));
document.head.appendChild(script);
});
}
"""
get_js_colors = """
async (canvasData) => {
const canvasEl = document.getElementById("canvas-root");
return [canvasEl._data]
}
"""
css = '''
#color-bg{display:flex;justify-content: center;align-items: center;}
.color-bg-item{width: 100%; height: 32px}
#main_button{width:100%}
<style>
'''
#################################################
title = r"""
<h1 align="center">Demo for Consistent Self-Attention for Long-Range Image and Video Generation</h1>
"""
description = r"""
1️⃣ Enter a Textual Description for Character, if you add the Ref-Image, making sure to <b>follow the class word</b> you want to customize with the <b>trigger word</b>: `img`, such as: `man img` or `woman img` or `girl img`.<br>
2️⃣ Enter the prompt array, each line corrsponds to one generated image.<br>
3️⃣ Choose your preferred style template.<br>
4️⃣ Click the <b>Submit</b> button to start customizing.
"""
version = r"""
<h3 align="center">StoryDiffusion Version 0.01 (test version)</h3>
<h5 >1. Support image ref image. (Cartoon Ref image is not support now)</h5>
<h5 >2. Support Typesetting Style and Captioning.(By default, the prompt is used as the caption for each image. If you need to change the caption, add a # at the end of each line. Only the part after the # will be added as a caption to the image.)</h5>
<h5 >3. [NC]symbol (The [NC] symbol is used as a flag to indicate that no characters should be present in the generated scene images. If you want do that, prepend the "[NC]" at the beginning of the line. For example, to generate a scene of falling leaves without any character, write: "[NC] The leaves are falling."),Currently, support is only using Textual Description</h5>
<h5>Tips: Not Ready Now! Just Test! It's better to use prompts to assist in controlling the character's attire. Depending on the limited code integration time, there might be some undiscovered bugs. If you find that a particular generation result is significantly poor, please email me (ypzhousdu@gmail.com) Thank you very much.</h4>
"""
#################################################
global attn_count, total_count, id_length, total_length,cur_step, cur_model_type
global write
global sa32, sa64
global height,width
attn_count = 0
total_count = 0
cur_step = 0
id_length = 4
total_length = 5
cur_model_type = ""
device="cuda"
global attn_procs,unet
attn_procs = {}
###
write = False
###
sa32 = 0.5
sa64 = 0.5
height = 768
width = 768
###
global sd_model_path
sd_model_path = models_dict["Unstable"]#"SG161222/RealVisXL_V4.0"
use_safetensors= False
### LOAD Stable Diffusion Pipeline
# pipe1 = StableDiffusionXLPipeline.from_pretrained(sd_model_path, torch_dtype=torch.float16, use_safetensors= use_safetensors)
# pipe1 = pipe1.to("cpu")
# pipe1.enable_freeu(s1=0.6, s2=0.4, b1=1.1, b2=1.2)
# # pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
# pipe1.scheduler.set_timesteps(50)
###
from typing import Any, Callable, Dict, List, Optional, Union, Tuple
from collections import OrderedDict
import os
import PIL
import numpy as np
import torch
from torchvision import transforms as T
from safetensors import safe_open
from huggingface_hub.utils import validate_hf_hub_args
from transformers import CLIPImageProcessor, CLIPTokenizer
from diffusers import StableDiffusionXLPipeline
from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput
from diffusers.utils import (
_get_model_file,
is_transformers_available,
logging,
)
class PhotoMakerIDEncoder(CLIPVisionModelWithProjection):
def __init__(self):
super().__init__(CLIPVisionConfig(**VISION_CONFIG_DICT))
self.visual_projection_2 = nn.Linear(1024, 1280, bias=False)
self.fuse_module = FuseModule(2048)
def forward(self, id_pixel_values, prompt_embeds, class_tokens_mask):
b, num_inputs, c, h, w = id_pixel_values.shape
id_pixel_values = id_pixel_values.view(b * num_inputs, c, h, w)
shared_id_embeds = self.vision_model(id_pixel_values)[1]
id_embeds = self.visual_projection(shared_id_embeds)
id_embeds_2 = self.visual_projection_2(shared_id_embeds)
id_embeds = id_embeds.view(b, num_inputs, 1, -1)
id_embeds_2 = id_embeds_2.view(b, num_inputs, 1, -1)
id_embeds = torch.cat((id_embeds, id_embeds_2), dim=-1)
updated_prompt_embeds = self.fuse_module(prompt_embeds, id_embeds, class_tokens_mask)
return updated_prompt_embeds
PipelineImageInput = Union[
PIL.Image.Image,
torch.FloatTensor,
List[PIL.Image.Image],
List[torch.FloatTensor],
]
class PhotoMakerStableDiffusionXLPipeline(StableDiffusionXLPipeline):
@validate_hf_hub_args
def load_photomaker_adapter(
self,
pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]],
weight_name: str,
subfolder: str = '',
trigger_word: str = 'img',
**kwargs,
):
"""
Parameters:
pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
Can be either:
- A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
the Hub.
- A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
with [`ModelMixin.save_pretrained`].
- A [torch state
dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
weight_name (`str`):
The weight name NOT the path to the weight.
subfolder (`str`, defaults to `""`):
The subfolder location of a model file within a larger model repository on the Hub or locally.
trigger_word (`str`, *optional*, defaults to `"img"`):
The trigger word is used to identify the position of class word in the text prompt,
and it is recommended not to set it as a common word.
This trigger word must be placed after the class word when used, otherwise, it will affect the performance of the personalized generation.
"""
# Load the main state dict first.
cache_dir = kwargs.pop("cache_dir", None)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
local_files_only = kwargs.pop("local_files_only", None)
token = kwargs.pop("token", None)
revision = kwargs.pop("revision", None)
user_agent = {
"file_type": "attn_procs_weights",
"framework": "pytorch",
}
if not isinstance(pretrained_model_name_or_path_or_dict, dict):
model_file = _get_model_file(
pretrained_model_name_or_path_or_dict,
weights_name=weight_name,
cache_dir=cache_dir,
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
local_files_only=local_files_only,
token=token,
revision=revision,
subfolder=subfolder,
user_agent=user_agent,
)
if weight_name.endswith(".safetensors"):
state_dict = {"id_encoder": {}, "lora_weights": {}}
with safe_open(model_file, framework="pt", device="cpu") as f:
for key in f.keys():
if key.startswith("id_encoder."):
state_dict["id_encoder"][key.replace("id_encoder.", "")] = f.get_tensor(key)
elif key.startswith("lora_weights."):
state_dict["lora_weights"][key.replace("lora_weights.", "")] = f.get_tensor(key)
else:
state_dict = torch.load(model_file, map_location="cpu")
else:
state_dict = pretrained_model_name_or_path_or_dict
keys = list(state_dict.keys())
if keys != ["id_encoder", "lora_weights"]:
raise ValueError("Required keys are (`id_encoder` and `lora_weights`) missing from the state dict.")
self.trigger_word = trigger_word
# load finetuned CLIP image encoder and fuse module here if it has not been registered to the pipeline yet
print(f"Loading PhotoMaker components [1] id_encoder from [{pretrained_model_name_or_path_or_dict}]...")
id_encoder = PhotoMakerIDEncoder()
id_encoder.load_state_dict(state_dict["id_encoder"], strict=True)
id_encoder = id_encoder.to(self.device, dtype=self.unet.dtype)
self.id_encoder = id_encoder
self.id_image_processor = CLIPImageProcessor()
# load lora into models
print(f"Loading PhotoMaker components [2] lora_weights from [{pretrained_model_name_or_path_or_dict}]")
self.load_lora_weights(state_dict["lora_weights"], adapter_name="photomaker")
# Add trigger word token
if self.tokenizer is not None:
self.tokenizer.add_tokens([self.trigger_word], special_tokens=True)
self.tokenizer_2.add_tokens([self.trigger_word], special_tokens=True)
def encode_prompt_with_trigger_word(
self,
prompt: str,
prompt_2: Optional[str] = None,
num_id_images: int = 1,
device: Optional[torch.device] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
class_tokens_mask: Optional[torch.LongTensor] = None,
):
device = device or self._execution_device
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
# Find the token id of the trigger word
image_token_id = self.tokenizer_2.convert_tokens_to_ids(self.trigger_word)
# Define tokenizers and text encoders
tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
text_encoders = (
[self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
)
if prompt_embeds is None:
prompt_2 = prompt_2 or prompt
prompt_embeds_list = []
prompts = [prompt, prompt_2]
for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
input_ids = tokenizer.encode(prompt) # TODO: batch encode
clean_index = 0
clean_input_ids = []
class_token_index = []
# Find out the corresponding class word token based on the newly added trigger word token
for i, token_id in enumerate(input_ids):
if token_id == image_token_id:
class_token_index.append(clean_index - 1)
else:
clean_input_ids.append(token_id)
clean_index += 1
if len(class_token_index) != 1:
raise ValueError(
f"PhotoMaker currently does not support multiple trigger words in a single prompt.\
Trigger word: {self.trigger_word}, Prompt: {prompt}."
)
class_token_index = class_token_index[0]
# Expand the class word token and corresponding mask
class_token = clean_input_ids[class_token_index]
clean_input_ids = clean_input_ids[:class_token_index] + [class_token] * num_id_images + \
clean_input_ids[class_token_index+1:]
# Truncation or padding
max_len = tokenizer.model_max_length
if len(clean_input_ids) > max_len:
clean_input_ids = clean_input_ids[:max_len]
else:
clean_input_ids = clean_input_ids + [tokenizer.pad_token_id] * (
max_len - len(clean_input_ids)
)
class_tokens_mask = [True if class_token_index <= i < class_token_index+num_id_images else False \
for i in range(len(clean_input_ids))]
clean_input_ids = torch.tensor(clean_input_ids, dtype=torch.long).unsqueeze(0)
class_tokens_mask = torch.tensor(class_tokens_mask, dtype=torch.bool).unsqueeze(0)
prompt_embeds = text_encoder(
clean_input_ids.to(device),
output_hidden_states=True,
)
# We are only ALWAYS interested in the pooled output of the final text encoder
pooled_prompt_embeds = prompt_embeds[0]
prompt_embeds = prompt_embeds.hidden_states[-2]
prompt_embeds_list.append(prompt_embeds)
prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
class_tokens_mask = class_tokens_mask.to(device=device) # TODO: ignoring two-prompt case
return prompt_embeds, pooled_prompt_embeds, class_tokens_mask
@property
def interrupt(self):
return self._interrupt
@torch.no_grad()
def __call__(
self,
prompt: Union[str, List[str]] = None,
prompt_2: Optional[Union[str, List[str]]] = None,
height: Optional[int] = None,
width: Optional[int] = None,
num_inference_steps: int = 50,
denoising_end: Optional[float] = None,
guidance_scale: float = 5.0,
negative_prompt: Optional[Union[str, List[str]]] = None,
negative_prompt_2: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
guidance_rescale: float = 0.0,
original_size: Optional[Tuple[int, int]] = None,
crops_coords_top_left: Tuple[int, int] = (0, 0),
target_size: Optional[Tuple[int, int]] = None,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: int = 1,
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
# Added parameters (for PhotoMaker)
input_id_images: PipelineImageInput = None,
start_merge_step: int = 0, # TODO: change to `style_strength_ratio` in the future
class_tokens_mask: Optional[torch.LongTensor] = None,
prompt_embeds_text_only: Optional[torch.FloatTensor] = None,
pooled_prompt_embeds_text_only: Optional[torch.FloatTensor] = None,
):
r"""
Function invoked when calling the pipeline for generation.
Only the parameters introduced by PhotoMaker are discussed here.
For explanations of the previous parameters in StableDiffusionXLPipeline, please refer to https://github.com/huggingface/diffusers/blob/v0.25.0/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py
Args:
input_id_images (`PipelineImageInput`, *optional*):
Input ID Image to work with PhotoMaker.
class_tokens_mask (`torch.LongTensor`, *optional*):
Pre-generated class token. When the `prompt_embeds` parameter is provided in advance, it is necessary to prepare the `class_tokens_mask` beforehand for marking out the position of class word.
prompt_embeds_text_only (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
pooled_prompt_embeds_text_only (`torch.FloatTensor`, *optional*):
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
If not provided, pooled text embeddings will be generated from `prompt` input argument.
Returns:
[`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] or `tuple`:
[`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a
`tuple`. When returning a tuple, the first element is a list with the generated images.
"""
# 0. Default height and width to unet
height = height or self.unet.config.sample_size * self.vae_scale_factor
width = width or self.unet.config.sample_size * self.vae_scale_factor
original_size = original_size or (height, width)
target_size = target_size or (height, width)
# 1. Check inputs. Raise error if not correct
self.check_inputs(
prompt,
prompt_2,
height,
width,
callback_steps,
negative_prompt,
negative_prompt_2,
prompt_embeds,
negative_prompt_embeds,
pooled_prompt_embeds,
negative_pooled_prompt_embeds,
callback_on_step_end_tensor_inputs,
)
self._interrupt = False
#
if prompt_embeds is not None and class_tokens_mask is None:
raise ValueError(
"If `prompt_embeds` are provided, `class_tokens_mask` also have to be passed. Make sure to generate `class_tokens_mask` from the same tokenizer that was used to generate `prompt_embeds`."
)
# check the input id images
if input_id_images is None:
raise ValueError(
"Provide `input_id_images`. Cannot leave `input_id_images` undefined for PhotoMaker pipeline."
)
if not isinstance(input_id_images, list):
input_id_images = [input_id_images]
# 2. Define call parameters
if prompt is not None and isinstance(prompt, str):
batch_size = 1
prompt = [prompt]
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
device = self._execution_device
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale >= 1.0
assert do_classifier_free_guidance
# 3. Encode input prompt
num_id_images = len(input_id_images)
if isinstance(prompt, list):
prompt_arr = prompt
negative_prompt_embeds_arr = []
prompt_embeds_text_only_arr = []
prompt_embeds_arr = []
latents_arr = []
add_time_ids_arr = []
negative_pooled_prompt_embeds_arr = []
pooled_prompt_embeds_text_only_arr = []
pooled_prompt_embeds_arr = []
for prompt in prompt_arr:
(
prompt_embeds,
pooled_prompt_embeds,
class_tokens_mask,
) = self.encode_prompt_with_trigger_word(
prompt=prompt,
prompt_2=prompt_2,
device=device,
num_id_images=num_id_images,
prompt_embeds=prompt_embeds,
pooled_prompt_embeds=pooled_prompt_embeds,
class_tokens_mask=class_tokens_mask,
)
# 4. Encode input prompt without the trigger word for delayed conditioning
# encode, remove trigger word token, then decode
tokens_text_only = self.tokenizer.encode(prompt, add_special_tokens=False)
trigger_word_token = self.tokenizer.convert_tokens_to_ids(self.trigger_word)
tokens_text_only.remove(trigger_word_token)
prompt_text_only = self.tokenizer.decode(tokens_text_only, add_special_tokens=False)
print(prompt_text_only)
(
prompt_embeds_text_only,
negative_prompt_embeds,
pooled_prompt_embeds_text_only, # TODO: replace the pooled_prompt_embeds with text only prompt
negative_pooled_prompt_embeds,
) = self.encode_prompt(
prompt=prompt_text_only,
prompt_2=prompt_2,
device=device,
num_images_per_prompt=num_images_per_prompt,
do_classifier_free_guidance=True,
negative_prompt=negative_prompt,
negative_prompt_2=negative_prompt_2,
prompt_embeds=prompt_embeds_text_only,
negative_prompt_embeds=negative_prompt_embeds,
pooled_prompt_embeds=pooled_prompt_embeds_text_only,
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
)
# 5. Prepare the input ID images
dtype = next(self.id_encoder.parameters()).dtype
if not isinstance(input_id_images[0], torch.Tensor):
id_pixel_values = self.id_image_processor(input_id_images, return_tensors="pt").pixel_values
id_pixel_values = id_pixel_values.unsqueeze(0).to(device=device, dtype=dtype) # TODO: multiple prompts
# 6. Get the update text embedding with the stacked ID embedding
prompt_embeds = self.id_encoder(id_pixel_values, prompt_embeds, class_tokens_mask)
bs_embed, seq_len, _ = prompt_embeds.shape
# duplicate text embeddings for each generation per prompt, using mps friendly method
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
bs_embed * num_images_per_prompt, -1
)
negative_prompt_embeds_arr.append(negative_prompt_embeds)
negative_prompt_embeds = None
negative_pooled_prompt_embeds_arr.append(negative_pooled_prompt_embeds)
negative_pooled_prompt_embeds = None
prompt_embeds_text_only_arr.append(prompt_embeds_text_only)
prompt_embeds_text_only = None
prompt_embeds_arr.append(prompt_embeds)
prompt_embeds = None
pooled_prompt_embeds_arr.append(pooled_prompt_embeds)
pooled_prompt_embeds = None
pooled_prompt_embeds_text_only_arr.append(pooled_prompt_embeds_text_only)
pooled_prompt_embeds_text_only = None
# 7. Prepare timesteps
self.scheduler.set_timesteps(num_inference_steps, device=device)
timesteps = self.scheduler.timesteps
negative_prompt_embeds = torch.cat(negative_prompt_embeds_arr ,dim =0)
print(negative_prompt_embeds.shape)
prompt_embeds = torch.cat(prompt_embeds_arr ,dim = 0)
print(prompt_embeds.shape)
prompt_embeds_text_only = torch.cat(prompt_embeds_text_only_arr ,dim = 0)
print(prompt_embeds_text_only.shape)
pooled_prompt_embeds_text_only = torch.cat(pooled_prompt_embeds_text_only_arr ,dim = 0)
print(pooled_prompt_embeds_text_only.shape)
negative_pooled_prompt_embeds = torch.cat(negative_pooled_prompt_embeds_arr ,dim = 0)
print(negative_pooled_prompt_embeds.shape)
pooled_prompt_embeds = torch.cat(pooled_prompt_embeds_arr,dim = 0)
print(pooled_prompt_embeds.shape)
# 8. Prepare latent variables
num_channels_latents = self.unet.config.in_channels
latents = self.prepare_latents(
batch_size * num_images_per_prompt,
num_channels_latents,
height,
width,
prompt_embeds.dtype,
device,
generator,
latents,
)
# 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 10. Prepare added time ids & embeddings
if self.text_encoder_2 is None:
text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
else:
text_encoder_projection_dim = self.text_encoder_2.config.projection_dim
add_time_ids = self._get_add_time_ids(
original_size,
crops_coords_top_left,
target_size,
dtype=prompt_embeds.dtype,
text_encoder_projection_dim=text_encoder_projection_dim,
)
add_time_ids = torch.cat([add_time_ids, add_time_ids], dim=0)
add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
print(latents.shape)
print(add_time_ids.shape)
# 11. Denoising loop
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
if self.interrupt:
continue
latent_model_input = (
torch.cat([latents] * 2) if do_classifier_free_guidance else latents
)
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
if i <= start_merge_step:
current_prompt_embeds = torch.cat(
[negative_prompt_embeds, prompt_embeds_text_only], dim=0
)
add_text_embeds = torch.cat([negative_pooled_prompt_embeds, pooled_prompt_embeds_text_only], dim=0)
else:
current_prompt_embeds = torch.cat(
[negative_prompt_embeds, prompt_embeds], dim=0
)
add_text_embeds = torch.cat([negative_pooled_prompt_embeds, pooled_prompt_embeds], dim=0)
# predict the noise residual
added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
# print(latent_model_input.shape)
# print(t)
# print(current_prompt_embeds.shape)
# print(add_text_embeds.shape)
# print(add_time_ids.shape)
#zeros_matrix =
#global_mask1024 = torch.cat([torch.randn(1, 1024, 1, 1, device=device) for random_number])
#global_mask4096 =
noise_pred = self.unet(
latent_model_input,
t,
encoder_hidden_states=current_prompt_embeds,
cross_attention_kwargs=cross_attention_kwargs,
added_cond_kwargs=added_cond_kwargs,
return_dict=False,
)[0]
# print(noise_pred.shape)
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
if do_classifier_free_guidance and guidance_rescale > 0.0:
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
if callback_on_step_end is not None:
callback_kwargs = {}
for k in callback_on_step_end_tensor_inputs:
callback_kwargs[k] = locals()[k]
ck_outputs = callback_on_step_end(self, i, t, callback_kwargs)
latents = callback_outputs.pop("latents", latents)
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds)
# negative_pooled_prompt_embeds = callback_outputs.pop(
# "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds
# )
# add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids)
# negative_add_time_ids = callback_outputs.pop("negative_add_time_ids", negative_add_time_ids)
# call the callback, if provided
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()
if callback is not None and i % callback_steps == 0:
step_idx = i // getattr(self.scheduler, "order", 1)
callback(step_idx, t, latents)
# make sure the VAE is in float32 mode, as it overflows in float16
if self.vae.dtype == torch.float16 and self.vae.config.force_upcast:
self.upcast_vae()
latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
if not output_type == "latent":
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
else:
image = latents
return StableDiffusionXLPipelineOutput(images=image)
# apply watermark if available
# if self.watermark is not None:
# image = self.watermark.apply_watermark(image)
image = self.image_processor.postprocess(image, output_type=output_type)
# Offload all models
self.maybe_free_model_hooks()
if not return_dict:
return (image,)
return StableDiffusionXLPipelineOutput(images=image)
pipe2 = PhotoMakerStableDiffusionXLPipeline.from_pretrained(
models_dict["Unstable"], torch_dtype=torch.float16, use_safetensors=use_safetensors)
pipe2 = pipe2.to("cpu")
pipe2.load_photomaker_adapter(
os.path.dirname(photomaker_path),
subfolder="",
weight_name=os.path.basename(photomaker_path),
trigger_word="img" # define the trigger word
)
pipe2 = pipe2.to("cpu")
pipe2.enable_freeu(s1=0.6, s2=0.4, b1=1.1, b2=1.2)
pipe2.fuse_lora()
pipe4 = PhotoMakerStableDiffusionXLPipeline.from_pretrained(
models_dict["RealVision"], torch_dtype=torch.float16, use_safetensors=True)
pipe4 = pipe4.to("cpu")
pipe4.load_photomaker_adapter(
os.path.dirname(photomaker_path),
subfolder="",
weight_name=os.path.basename(photomaker_path),
trigger_word="img" # define the trigger word
)
pipe4 = pipe4.to("cpu")
pipe4.enable_freeu(s1=0.6, s2=0.4, b1=1.1, b2=1.2)
pipe4.fuse_lora()
# pipe3 = StableDiffusionXLPipeline.from_pretrained("SG161222/RealVisXL_V4.0", torch_dtype=torch.float16)
# pipe3 = pipe3.to("cpu")
# pipe3.enable_freeu(s1=0.6, s2=0.4, b1=1.1, b2=1.2)
# # pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
# pipe3.scheduler.set_timesteps(50)
######### Gradio Fuction
def swap_to_gallery(images):
return gr.update(value=images, visible=True), gr.update(visible=True), gr.update(visible=False)
def upload_example_to_gallery(images, prompt, style, negative_prompt):
return gr.update(value=images, visible=True), gr.update(visible=True), gr.update(visible=False)
def remove_back_to_files():
return gr.update(visible=False), gr.update(visible=False), gr.update(visible=True)
def remove_tips():
return gr.update(visible=False)
def apply_style_positive(style_name: str, positive: str):
p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
return p.replace("{prompt}", positive)
def apply_style(style_name: str, positives: list, negative: str = ""):
p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
return [p.replace("{prompt}", positive) for positive in positives], n + ' ' + negative
def change_visiale_by_model_type(_model_type):
if _model_type == "Only Using Textual Description":
return gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
elif _model_type == "Using Ref Images":
return gr.update(visible=True), gr.update(visible=True), gr.update(visible=False)
else:
raise ValueError("Invalid model type",_model_type)
@spaces.GPU(duration=120)
def process_generation(_sd_type,_model_type,_upload_images, _num_steps,style_name, _Ip_Adapter_Strength ,_style_strength_ratio, guidance_scale, seed_, sa32_, sa64_, id_length_, general_prompt, negative_prompt,prompt_array,G_height,G_width,_comic_type):
_model_type = "Photomaker" if _model_type == "Using Ref Images" else "original"
if _model_type == "Photomaker" and "img" not in general_prompt:
raise gr.Error("Please add the triger word \" img \" behind the class word you want to customize, such as: man img or woman img")
if _upload_images is None and _model_type != "original":
raise gr.Error(f"Cannot find any input face image!")
if len(prompt_array.splitlines()) > 10:
raise gr.Error(f"No more than 10 prompts in huggface demo for Speed! But found {len(prompt_array.splitlines())} prompts!")
global sa32, sa64,id_length,total_length,attn_procs,unet,cur_model_type,device
global num_steps
global write
global cur_step,attn_count
global height,width
height = G_height
width = G_width
global pipe2,pipe4
global sd_model_path,models_dict
sd_model_path = models_dict[_sd_type]
num_steps =_num_steps
use_safe_tensor = True
if style_name == "(No style)":
sd_model_path = models_dict["RealVision"]
if _model_type == "original":
pipe = StableDiffusionXLPipeline.from_pretrained(sd_model_path, torch_dtype=torch.float16)
pipe = pipe.to(device)
pipe.enable_freeu(s1=0.6, s2=0.4, b1=1.1, b2=1.2)
# pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
# pipe.scheduler.set_timesteps(50)
set_attention_processor(pipe.unet,id_length_,is_ipadapter = False)
elif _model_type == "Photomaker":
if _sd_type != "RealVision" and style_name != "(No style)":
pipe = pipe2.to(device)
pipe.id_encoder.to(device)
set_attention_processor(pipe.unet,id_length_,is_ipadapter = False)
else:
pipe = pipe4.to(device)
pipe.id_encoder.to(device)
set_attention_processor(pipe.unet,id_length_,is_ipadapter = False)
else:
raise NotImplementedError("You should choice between original and Photomaker!",f"But you choice {_model_type}")
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
pipe.enable_freeu(s1=0.6, s2=0.4, b1=1.1, b2=1.2)
cur_model_type = _sd_type+"-"+_model_type+""+str(id_length_)
if _model_type != "original":
input_id_images = []
for img in _upload_images:
print(img)
input_id_images.append(load_image(img))
prompts = prompt_array.splitlines()
start_merge_step = int(float(_style_strength_ratio) / 100 * _num_steps)
if start_merge_step > 30:
start_merge_step = 30
print(f"start_merge_step:{start_merge_step}")
generator = torch.Generator(device="cuda").manual_seed(seed_)
sa32, sa64 = sa32_, sa64_
id_length = id_length_
clipped_prompts = prompts[:]
prompts = [general_prompt + "," + prompt if "[NC]" not in prompt else prompt.replace("[NC]","") for prompt in clipped_prompts]
prompts = [prompt.rpartition('#')[0] if "#" in prompt else prompt for prompt in prompts]
print(prompts)
id_prompts = prompts[:id_length]
real_prompts = prompts[id_length:]
torch.cuda.empty_cache()
write = True
cur_step = 0
attn_count = 0
id_prompts, negative_prompt = apply_style(style_name, id_prompts, negative_prompt)
setup_seed(seed_)
total_results = []
if _model_type == "original":
id_images = pipe(id_prompts, num_inference_steps=_num_steps, guidance_scale=guidance_scale, height = height, width = width,negative_prompt = negative_prompt,generator = generator).images
elif _model_type == "Photomaker":
id_images = pipe(id_prompts,input_id_images=input_id_images, num_inference_steps=_num_steps, guidance_scale=guidance_scale, start_merge_step = start_merge_step, height = height, width = width,negative_prompt = negative_prompt,generator = generator).images
else:
raise NotImplementedError("You should choice between original and Photomaker!",f"But you choice {_model_type}")
total_results = id_images + total_results
yield total_results
real_images = []
write = False
for real_prompt in real_prompts:
setup_seed(seed_)
cur_step = 0
real_prompt = apply_style_positive(style_name, real_prompt)
if _model_type == "original":
real_images.append(pipe(real_prompt, num_inference_steps=_num_steps, guidance_scale=guidance_scale, height = height, width = width,negative_prompt = negative_prompt,generator = generator).images[0])
elif _model_type == "Photomaker":
real_images.append(pipe(real_prompt, input_id_images=input_id_images, num_inference_steps=_num_steps, guidance_scale=guidance_scale, start_merge_step = start_merge_step, height = height, width = width,negative_prompt = negative_prompt,generator = generator).images[0])
else:
raise NotImplementedError("You should choice between original and Photomaker!",f"But you choice {_model_type}")
total_results = [real_images[-1]] + total_results
yield total_results
if _comic_type != "No typesetting (default)":
captions= prompt_array.splitlines()
captions = [caption.replace("[NC]","") for caption in captions]
captions = [caption.split('#')[-1] if "#" in caption else caption for caption in captions]
from PIL import ImageFont
total_results = get_comic(id_images + real_images, _comic_type,captions= captions,font=ImageFont.truetype("./fonts/Inkfree.ttf", int(45))) + total_results
if _model_type == "Photomaker":
pipe = pipe2.to("cpu")
pipe.id_encoder.to("cpu")
set_attention_processor(pipe.unet,id_length_,is_ipadapter = False)
yield total_results
def array2string(arr):
stringtmp = ""
for i,part in enumerate(arr):
if i != len(arr)-1:
stringtmp += part +"\n"
else:
stringtmp += part
return stringtmp
with gr.Blocks(css=css) as demo:
binary_matrixes = gr.State([])
color_layout = gr.State([])
gr.Markdown(title)
gr.Markdown(description)
with gr.Row():
with gr.Group(elem_id="main-image"):
prompts = []
colors = []
with gr.Column(visible=True) as gen_prompt_vis:
sd_type = gr.Dropdown(choices=list(models_dict.keys()), value = "Unstable",label="sd_type", info="Select pretrained model")
model_type = gr.Radio(["Only Using Textual Description", "Using Ref Images"], label="model_type", value = "Only Using Textual Description", info="Control type of the Character")
with gr.Group(visible=False) as control_image_input:
files = gr.Files(
label="Drag (Select) 1 or more photos of your face",
file_types=["image"],
)
uploaded_files = gr.Gallery(label="Your images", visible=False, columns=5, rows=1, height=200)
with gr.Column(visible=False) as clear_button:
remove_and_reupload = gr.ClearButton(value="Remove and upload new ones", components=files, size="sm")
general_prompt = gr.Textbox(value='', label="(1) Textual Description for Character", interactive=True)
negative_prompt = gr.Textbox(value='', label="(2) Negative_prompt", interactive=True)
style = gr.Dropdown(label="Style template", choices=STYLE_NAMES, value=DEFAULT_STYLE_NAME)
prompt_array = gr.Textbox(lines = 3,value='', label="(3) Comic Description (each line corresponds to a frame).", interactive=True)
with gr.Accordion("(4) Tune the hyperparameters", open=True):
#sa16_ = gr.Slider(label=" (The degree of Paired Attention at 16 x 16 self-attention layers) ", minimum=0, maximum=1., value=0.3, step=0.1)
sa32_ = gr.Slider(label=" (The degree of Paired Attention at 32 x 32 self-attention layers) ", minimum=0, maximum=1., value=0.7, step=0.1)
sa64_ = gr.Slider(label=" (The degree of Paired Attention at 64 x 64 self-attention layers) ", minimum=0, maximum=1., value=0.7, step=0.1)
id_length_ = gr.Slider(label= "Number of id images in total images" , minimum=2, maximum=4, value=3, step=1)
# total_length_ = gr.Slider(label= "Number of total images", minimum=1, maximum=20, value=1, step=1)
seed_ = gr.Slider(label="Seed", minimum=-1, maximum=MAX_SEED, value=0, step=1)
num_steps = gr.Slider(
label="Number of sample steps",
minimum=25,
maximum=50,
step=1,
value=50,
)
G_height = gr.Slider(
label="height",
minimum=256,
maximum=1024,
step=32,
value=1024,
)
G_width = gr.Slider(
label="width",
minimum=256,
maximum=1024,
step=32,
value=1024,
)
comic_type = gr.Radio(["No typesetting (default)", "Four Pannel", "Classic Comic Style"], value = "Classic Comic Style", label="Typesetting Style", info="Select the typesetting style ")
guidance_scale = gr.Slider(
label="Guidance scale",
minimum=0.1,
maximum=10.0,
step=0.1,
value=5,
)
style_strength_ratio = gr.Slider(
label="Style strength of Ref Image (%)",
minimum=15,
maximum=50,
step=1,
value=20,
visible=False
)
Ip_Adapter_Strength = gr.Slider(
label="Ip_Adapter_Strength",
minimum=0,
maximum=1,
step=0.1,
value=0.5,
visible=False
)
final_run_btn = gr.Button("Generate ! 😺")
with gr.Column():
out_image = gr.Gallery(label="Result", columns=2, height='auto')
generated_information = gr.Markdown(label="Generation Details", value="",visible=False)
gr.Markdown(version)
model_type.change(fn = change_visiale_by_model_type , inputs = model_type, outputs=[control_image_input,style_strength_ratio,Ip_Adapter_Strength])
files.upload(fn=swap_to_gallery, inputs=files, outputs=[uploaded_files, clear_button, files])
remove_and_reupload.click(fn=remove_back_to_files, outputs=[uploaded_files, clear_button, files])
final_run_btn.click(fn=set_text_unfinished, outputs = generated_information
).then(process_generation, inputs=[sd_type,model_type,files, num_steps,style, Ip_Adapter_Strength,style_strength_ratio, guidance_scale, seed_, sa32_, sa64_, id_length_, general_prompt, negative_prompt, prompt_array,G_height,G_width,comic_type], outputs=out_image
).then(fn=set_text_finished,outputs = generated_information)
#gr.Markdown(article)
demo.launch()