Spaces:
Running
on
Zero
Running
on
Zero
alfredplpl
commited on
Commit
•
9fe4df1
1
Parent(s):
17899c8
Update app.py
Browse files
app.py
CHANGED
@@ -33,22 +33,43 @@ pipe_i2i=pipe_i2i_merged.to("cuda")
|
|
33 |
pipe.enable_xformers_memory_efficient_attention()
|
34 |
pipe_i2i.enable_xformers_memory_efficient_attention()
|
35 |
|
36 |
-
|
37 |
-
embeddings_dict=torch.load(
|
38 |
print(embeddings_dict)
|
39 |
if "string_to_param" in embeddings_dict:
|
40 |
embeddings = next(iter(embeddings_dict['string_to_param'].values()))
|
41 |
-
|
42 |
for i, emb in enumerate(embeddings):
|
43 |
token = f"_s{i+1}"
|
44 |
-
|
45 |
pipe.tokenizer.add_tokens(token)
|
46 |
pipe.text_encoder.resize_token_embeddings(len(pipe.tokenizer))
|
47 |
token_id = pipe.tokenizer.convert_tokens_to_ids(token)
|
48 |
pipe.text_encoder.get_input_embeddings().weight.data[token_id] = emb
|
49 |
else:
|
50 |
-
|
51 |
-
embeddings = embeddings_dict[
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
pipe.tokenizer.add_tokens(placeholder_token)
|
53 |
pipe.text_encoder.resize_token_embeddings(len(pipe.tokenizer))
|
54 |
placeholder_token_id = pipe.tokenizer.convert_tokens_to_ids(placeholder_token)
|
@@ -98,8 +119,8 @@ def auto_prompt_correction(prompt_ui,neg_prompt_ui,disable_auto_prompt_correctio
|
|
98 |
return prompt, neg_prompt
|
99 |
|
100 |
if(prompt=="" and neg_prompt==""):
|
101 |
-
prompt="anime, masterpiece, portrait, a girl with flowers, good pupil, 4k, detailed"
|
102 |
-
neg_prompt=f"{
|
103 |
return prompt, neg_prompt
|
104 |
|
105 |
splited_prompt=prompt.replace(","," ").replace("_"," ").split(" ")
|
|
|
33 |
pipe.enable_xformers_memory_efficient_attention()
|
34 |
pipe_i2i.enable_xformers_memory_efficient_attention()
|
35 |
|
36 |
+
embeddings_path=hf_hub_download(repo_id=model_id, filename="nfixer.pt", use_auth_token=token)
|
37 |
+
embeddings_dict=torch.load(embeddings_path)
|
38 |
print(embeddings_dict)
|
39 |
if "string_to_param" in embeddings_dict:
|
40 |
embeddings = next(iter(embeddings_dict['string_to_param'].values()))
|
41 |
+
nfixer = ""
|
42 |
for i, emb in enumerate(embeddings):
|
43 |
token = f"_s{i+1}"
|
44 |
+
nfixer += token
|
45 |
pipe.tokenizer.add_tokens(token)
|
46 |
pipe.text_encoder.resize_token_embeddings(len(pipe.tokenizer))
|
47 |
token_id = pipe.tokenizer.convert_tokens_to_ids(token)
|
48 |
pipe.text_encoder.get_input_embeddings().weight.data[token_id] = emb
|
49 |
else:
|
50 |
+
nfixer = list(embeddings_dict.keys())[0]
|
51 |
+
embeddings = embeddings_dict[nfixer].to(pipe.text_encoder.get_input_embeddings().weight.dtype)
|
52 |
+
pipe.tokenizer.add_tokens(placeholder_token)
|
53 |
+
pipe.text_encoder.resize_token_embeddings(len(pipe.tokenizer))
|
54 |
+
placeholder_token_id = pipe.tokenizer.convert_tokens_to_ids(placeholder_token)
|
55 |
+
pipe.text_encoder.get_input_embeddings().weight.data[placeholder_token_id] = embeddings
|
56 |
+
|
57 |
+
embeddings_path=hf_hub_download(repo_id=model_id, filename="embellish2.pt", use_auth_token=token)
|
58 |
+
embeddings_dict=torch.load(embeddings_path)
|
59 |
+
print(embeddings_dict)
|
60 |
+
if "string_to_param" in embeddings_dict:
|
61 |
+
embeddings = next(iter(embeddings_dict['string_to_param'].values()))
|
62 |
+
embellish2 = ""
|
63 |
+
for i, emb in enumerate(embeddings):
|
64 |
+
token = f"_s{i+1}"
|
65 |
+
embellish2 += token
|
66 |
+
pipe.tokenizer.add_tokens(token)
|
67 |
+
pipe.text_encoder.resize_token_embeddings(len(pipe.tokenizer))
|
68 |
+
token_id = pipe.tokenizer.convert_tokens_to_ids(token)
|
69 |
+
pipe.text_encoder.get_input_embeddings().weight.data[token_id] = emb
|
70 |
+
else:
|
71 |
+
embellish2 = list(embeddings_dict.keys())[0]
|
72 |
+
embeddings = embeddings_dict[embellish2].to(pipe.text_encoder.get_input_embeddings().weight.dtype)
|
73 |
pipe.tokenizer.add_tokens(placeholder_token)
|
74 |
pipe.text_encoder.resize_token_embeddings(len(pipe.tokenizer))
|
75 |
placeholder_token_id = pipe.tokenizer.convert_tokens_to_ids(placeholder_token)
|
|
|
119 |
return prompt, neg_prompt
|
120 |
|
121 |
if(prompt=="" and neg_prompt==""):
|
122 |
+
prompt=f"{embellish2},anime, masterpiece, portrait, a girl with flowers, good pupil, 4k, detailed"
|
123 |
+
neg_prompt=f"{nfixer},(((deformed))), blurry, ((((bad anatomy)))), bad pupil, disfigured, poorly drawn face, mutation, mutated, (extra limb), (ugly), (poorly drawn hands), bad hands, fused fingers, messy drawing, broken legs censor, low quality, (mutated hands and fingers:1.5), (long body :1.3), (mutation, poorly drawn :1.2), ((bad eyes)), ui, error, missing fingers, fused fingers, one hand with more than 5 fingers, one hand with less than 5 fingers, one hand with more than 5 digit, one hand with less than 5 digit, extra digit, fewer digits, fused digit, missing digit, bad digit, liquid digit, long body, uncoordinated body, unnatural body, lowres, jpeg artifacts, 3d, cg, text"
|
124 |
return prompt, neg_prompt
|
125 |
|
126 |
splited_prompt=prompt.replace(","," ").replace("_"," ").split(" ")
|