Spaces:
Running
on
Zero
Running
on
Zero
alfredplpl
commited on
Commit
•
84d7485
1
Parent(s):
85c5886
Update app.py
Browse files
app.py
CHANGED
@@ -27,57 +27,9 @@ pipe=pipe.to("cuda")
|
|
27 |
pipe.enable_freeu(s1=0.6, s2=0.4, b1=1.1, b2=1.2)
|
28 |
#pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
29 |
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
embeddings_dict = {}
|
34 |
-
with safe_open("unaestheticXLv31.safetensors", framework="pt") as f:
|
35 |
-
for k in f.keys():
|
36 |
-
embeddings_dict[k] = f.get_tensor(k)
|
37 |
-
pipe.text_encoder.resize_token_embeddings(len(pipe.tokenizer),pad_to_multiple_of=128)
|
38 |
-
pipe.text_encoder_2.resize_token_embeddings(len(pipe.tokenizer),pad_to_multiple_of=128)
|
39 |
-
for i in range(len(embeddings_dict["clip_l"])):
|
40 |
-
token = f"sksd{chr(token_num)}"
|
41 |
-
token_num+=1
|
42 |
-
unaestheticXLv31 += token
|
43 |
-
pipe.tokenizer.add_tokens(token)
|
44 |
-
token_id = pipe.tokenizer.convert_tokens_to_ids(token)
|
45 |
-
pipe.text_encoder.get_input_embeddings().weight.data[token_id] = embeddings_dict["clip_l"][i]
|
46 |
-
pipe.text_encoder_2.get_input_embeddings().weight.data[token_id] = embeddings_dict["clip_g"][i]
|
47 |
-
|
48 |
-
unaestheticXLv1=""
|
49 |
-
embeddings_dict = {}
|
50 |
-
with safe_open("unaestheticXLv1.safetensors", framework="pt") as f:
|
51 |
-
for k in f.keys():
|
52 |
-
embeddings_dict[k] = f.get_tensor(k)
|
53 |
-
pipe.text_encoder.resize_token_embeddings(len(pipe.tokenizer),pad_to_multiple_of=128)
|
54 |
-
pipe.text_encoder_2.resize_token_embeddings(len(pipe.tokenizer),pad_to_multiple_of=128)
|
55 |
-
for i in range(len(embeddings_dict["clip_l"])):
|
56 |
-
token = f"sksd{chr(token_num)}"
|
57 |
-
token_num+=1
|
58 |
-
unaestheticXLv1 += token
|
59 |
-
pipe.tokenizer.add_tokens(token)
|
60 |
-
token_id = pipe.tokenizer.convert_tokens_to_ids(token)
|
61 |
-
pipe.text_encoder.get_input_embeddings().weight.data[token_id] = embeddings_dict["clip_l"][i]
|
62 |
-
pipe.text_encoder_2.get_input_embeddings().weight.data[token_id] = embeddings_dict["clip_g"][i]
|
63 |
-
|
64 |
-
unaestheticXLv13=""
|
65 |
-
embeddings_dict = {}
|
66 |
-
with safe_open("unaestheticXLv13.safetensors", framework="pt") as f:
|
67 |
-
for k in f.keys():
|
68 |
-
embeddings_dict[k] = f.get_tensor(k)
|
69 |
-
|
70 |
-
pipe.text_encoder.resize_token_embeddings(len(pipe.tokenizer),pad_to_multiple_of=128)
|
71 |
-
pipe.text_encoder_2.resize_token_embeddings(len(pipe.tokenizer),pad_to_multiple_of=128)
|
72 |
-
for i in range(len(embeddings_dict["clip_l"])):
|
73 |
-
token = f"sksd{chr(token_num)}"
|
74 |
-
unaestheticXLv13 += token
|
75 |
-
token_num+=1
|
76 |
-
pipe.tokenizer.add_tokens(token)
|
77 |
-
token_id = pipe.tokenizer.convert_tokens_to_ids(token)
|
78 |
-
pipe.text_encoder.get_input_embeddings().weight.data[token_id] = embeddings_dict["clip_l"][i]
|
79 |
-
pipe.text_encoder_2.get_input_embeddings().weight.data[token_id] = embeddings_dict["clip_g"][i]
|
80 |
-
|
81 |
|
82 |
compel = Compel(tokenizer=[pipe.tokenizer, pipe.tokenizer_2] ,
|
83 |
text_encoder=[pipe.text_encoder, pipe.text_encoder_2],
|
@@ -114,7 +66,7 @@ def auto_prompt_correction(prompt_ui,neg_prompt_ui,disable_auto_prompt_correctio
|
|
114 |
|
115 |
if(prompt=="" and neg_prompt==""):
|
116 |
prompt="1girl++, smile--, brown bob+++ hair, brown eyes, sunflowers, sky, transparent++"
|
117 |
-
neg_prompt=f"
|
118 |
return prompt, neg_prompt
|
119 |
|
120 |
splited_prompt=prompt.replace(","," ").replace("_"," ").replace("+"," ").split(" ")
|
@@ -123,14 +75,14 @@ def auto_prompt_correction(prompt_ui,neg_prompt_ui,disable_auto_prompt_correctio
|
|
123 |
for word in human_words:
|
124 |
if( word in splited_prompt):
|
125 |
prompt=f"anime artwork, anime style, {prompt}"
|
126 |
-
neg_prompt=f"
|
127 |
return prompt, neg_prompt
|
128 |
|
129 |
animal_words=["cat","dog","bird","pigeon","rabbit","bunny","horse"]
|
130 |
for word in animal_words:
|
131 |
if( word in splited_prompt):
|
132 |
prompt=f"anime style, a {prompt}, 4k, detailed"
|
133 |
-
neg_prompt=f"{neg_prompt},
|
134 |
return prompt, neg_prompt
|
135 |
|
136 |
background_words=["mount fuji","mt. fuji","building", "buildings", "tokyo", "kyoto", "nara", "shibuya", "shinjuku"]
|
|
|
27 |
pipe.enable_freeu(s1=0.6, s2=0.4, b1=1.1, b2=1.2)
|
28 |
#pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
29 |
|
30 |
+
state_dict = load_file("unaestheticXLv31.safetensors")
|
31 |
+
pipe.load_textual_inversion(state_dict["clip_g"], token="unaestheticXLv31", text_encoder=pipe.text_encoder_2, tokenizer=pipe.tokenizer_2)
|
32 |
+
pipe.load_textual_inversion(state_dict["clip_l"], token="unaestheticXLv31", text_encoder=pipe.text_encoder, tokenizer=pipe.tokenizer)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
|
34 |
compel = Compel(tokenizer=[pipe.tokenizer, pipe.tokenizer_2] ,
|
35 |
text_encoder=[pipe.text_encoder, pipe.text_encoder_2],
|
|
|
66 |
|
67 |
if(prompt=="" and neg_prompt==""):
|
68 |
prompt="1girl++, smile--, brown bob+++ hair, brown eyes, sunflowers, sky, transparent++"
|
69 |
+
neg_prompt=f"unaestheticXLv31---, photo, deformed, realism, disfigured, low contrast, bad hand"
|
70 |
return prompt, neg_prompt
|
71 |
|
72 |
splited_prompt=prompt.replace(","," ").replace("_"," ").replace("+"," ").split(" ")
|
|
|
75 |
for word in human_words:
|
76 |
if( word in splited_prompt):
|
77 |
prompt=f"anime artwork, anime style, {prompt}"
|
78 |
+
neg_prompt=f"unaestheticXLv31---,{neg_prompt}, photo, deformed, realism, disfigured, low contrast, bad hand"
|
79 |
return prompt, neg_prompt
|
80 |
|
81 |
animal_words=["cat","dog","bird","pigeon","rabbit","bunny","horse"]
|
82 |
for word in animal_words:
|
83 |
if( word in splited_prompt):
|
84 |
prompt=f"anime style, a {prompt}, 4k, detailed"
|
85 |
+
neg_prompt=f"{neg_prompt},unaestheticXLv31---"
|
86 |
return prompt, neg_prompt
|
87 |
|
88 |
background_words=["mount fuji","mt. fuji","building", "buildings", "tokyo", "kyoto", "nara", "shibuya", "shinjuku"]
|