iciromaco commited on
Commit
8cb7e17
1 Parent(s): 0c510aa
Files changed (1) hide show
  1. app.py +128 -0
app.py CHANGED
@@ -2,6 +2,134 @@
2
  import numpy as np
3
  import gradio as gr
4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  def inmm(init_image, prompt):
6
  (w,h) = init_image.size
7
  if w>h :
 
2
  import numpy as np
3
  import gradio as gr
4
 
5
+ import argparse
6
+ import itertools
7
+ import math
8
+ import os
9
+ import random
10
+
11
+ import numpy as np
12
+ import torch
13
+ import torch.nn.functional as F
14
+ import torch.utils.checkpoint
15
+ from torch.utils.data import Dataset
16
+
17
+ import PIL
18
+ from accelerate import Accelerator
19
+ from accelerate.logging import get_logger
20
+ from accelerate.utils import set_seed
21
+ from diffusers import StableDiffusionImg2ImgPipeline # 修正箇所
22
+ from diffusers import AutoencoderKL, DDPMScheduler, PNDMScheduler, UNet2DConditionModel
23
+ from diffusers.hub_utils import init_git_repo, push_to_hub
24
+ from diffusers.optimization import get_scheduler
25
+ from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
26
+ from PIL import Image
27
+ from torchvision import transforms
28
+ from tqdm.auto import tqdm
29
+ from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
30
+
31
+ pretrained_model_name_or_path = "CompVis/stable-diffusion-v1-4" #@param {type:"string"}
32
+
33
+ from IPython.display import Markdown
34
+ from huggingface_hub import hf_hub_download
35
+ #@title Load your concept here
36
+ #@markdown Enter the `repo_id` for a concept you like (you can find pre-learned concepts in the public [SD Concepts Library](https://huggingface.co/sd-concepts-library))
37
+ repo_id_embeds = "sd-concepts-library/mikako-methodi2i" #@param {type:"string"}
38
+
39
+
40
+ #@markdown (Optional) in case you have a `learned_embeds.bin` file and not a `repo_id`, add the path to `learned_embeds.bin` to the `embeds_url` variable
41
+ embeds_url = "" #Add the URL or path to a learned_embeds.bin file in case you have one
42
+ placeholder_token_string = "" #Add what is the token string in case you are uploading your own embed
43
+
44
+ downloaded_embedding_folder = "./downloaded_embedding"
45
+ if not os.path.exists(downloaded_embedding_folder):
46
+ os.mkdir(downloaded_embedding_folder)
47
+ if(not embeds_url):
48
+ embeds_path = hf_hub_download(repo_id=repo_id_embeds, filename="learned_embeds.bin", use_auth_token=True)
49
+ token_path = hf_hub_download(repo_id=repo_id_embeds, filename="token_identifier.txt", use_auth_token=True)
50
+ !cp $embeds_path $downloaded_embedding_folder
51
+ !cp $token_path $downloaded_embedding_folder
52
+ with open(f'{downloaded_embedding_folder}/token_identifier.txt', 'r') as file:
53
+ placeholder_token_string = file.read()
54
+ else:
55
+ !wget -q -O $downloaded_embedding_folder/learned_embeds.bin $embeds_url
56
+
57
+ learned_embeds_path = f"{downloaded_embedding_folder}/learned_embeds.bin"
58
+
59
+ display (Markdown("## The placeholder token for your concept is `%s`"%(placeholder_token_string)))
60
+
61
+ def image_grid(imgs, rows, cols):
62
+ assert len(imgs) == rows*cols
63
+
64
+ w, h = imgs[0].size
65
+ grid = Image.new('RGB', size=(cols*w, rows*h))
66
+ grid_w, grid_h = grid.size
67
+
68
+ for i, img in enumerate(imgs):
69
+ grid.paste(img, box=(i%cols*w, i//cols*h))
70
+ return grid
71
+
72
+ #@title Set up the Tokenizer and the Text Encoder
73
+ tokenizer = CLIPTokenizer.from_pretrained(
74
+ pretrained_model_name_or_path,
75
+ subfolder="tokenizer",
76
+ use_auth_token=True,
77
+ )
78
+ text_encoder = CLIPTextModel.from_pretrained(
79
+ pretrained_model_name_or_path, subfolder="text_encoder", use_auth_token=True
80
+ )
81
+
82
+ #@title Load the newly learned embeddings into CLIP
83
+ def load_learned_embed_in_clip(learned_embeds_path, text_encoder, tokenizer, token=None):
84
+ loaded_learned_embeds = torch.load(learned_embeds_path, map_location="cpu")
85
+
86
+ # separate token and the embeds
87
+ trained_token = list(loaded_learned_embeds.keys())[0]
88
+ embeds = loaded_learned_embeds[trained_token]
89
+
90
+ # cast to dtype of text_encoder
91
+ dtype = text_encoder.get_input_embeddings().weight.dtype
92
+ embeds.to(dtype)
93
+
94
+ # add the token in tokenizer
95
+ token = token if token is not None else trained_token
96
+ num_added_tokens = tokenizer.add_tokens(token)
97
+ if num_added_tokens == 0:
98
+ raise ValueError(f"The tokenizer already contains the token {token}. Please pass a different `token` that is not already in the tokenizer.")
99
+
100
+ # resize the token embeddings
101
+ text_encoder.resize_token_embeddings(len(tokenizer))
102
+
103
+ # get the id for the token and assign the embeds
104
+ token_id = tokenizer.convert_tokens_to_ids(token)
105
+ text_encoder.get_input_embeddings().weight.data[token_id] = embeds
106
+ load_learned_embed_in_clip(learned_embeds_path, text_encoder, tokenizer)
107
+
108
+ def crop_center(pil_img, crop_width, crop_height):
109
+ img_width, img_height = pil_img.size
110
+ return pil_img.crop(((img_width - crop_width) // 2,
111
+ (img_height - crop_height) // 2,
112
+ (img_width + crop_width) // 2,
113
+ (img_height + crop_height) // 2))
114
+
115
+ #@title Run the Stable Diffusion pipeline
116
+ #@markdown Don't forget to use the placeholder token in your prompt
117
+
118
+ from torch import autocast
119
+
120
+ #pipe = StableDiffusionPipeline.from_pretrained(
121
+ pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
122
+ pretrained_model_name_or_path,
123
+ torch_dtype=torch.float16,
124
+ text_encoder=text_encoder,
125
+ tokenizer=tokenizer,
126
+ use_auth_token=True,
127
+ crop = True,
128
+ ).to("cuda")
129
+ pipe.enable_attention_slicing()
130
+
131
+
132
+
133
  def inmm(init_image, prompt):
134
  (w,h) = init_image.size
135
  if w>h :