alfredplpl commited on
Commit
b9f307f
·
1 Parent(s): 6a9173e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -8
app.py CHANGED
@@ -82,6 +82,9 @@ for i in range(len(embeddings_dict["clip_l"])):
82
  pipe.text_encoder.get_input_embeddings().weight.data[token_id] = embeddings_dict["clip_l"][i]
83
  pipe.text_encoder_2.get_input_embeddings().weight.data[token_id] = embeddings_dict["clip_g"][i]
84
 
 
 
 
85
  def error_str(error, title="Error"):
86
  return f"""#### {title}
87
  {error}""" if error else ""
@@ -124,7 +127,7 @@ def auto_prompt_correction(prompt_ui,neg_prompt_ui,disable_auto_prompt_correctio
124
 
125
  if(prompt=="" and neg_prompt==""):
126
  prompt="1girl, sunflowers, brown bob hair, brown eyes, sky"
127
- neg_prompt=f"{unaestheticXLv31}, {unaestheticXLv31}, {unaestheticXLv13}, {unaestheticXLv13}, {unaestheticXLv1}"
128
  return prompt, neg_prompt
129
 
130
  splited_prompt=prompt.replace(","," ").replace("_"," ").split(" ")
@@ -153,14 +156,18 @@ def auto_prompt_correction(prompt_ui,neg_prompt_ui,disable_auto_prompt_correctio
153
  return prompt,neg_prompt
154
 
155
  def txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator):
 
 
156
  result = pipe(
157
- prompt,
158
- negative_prompt = neg_prompt,
159
- num_inference_steps = int(steps),
160
- guidance_scale = guidance,
161
- width = width,
162
- height = height,
163
- generator = generator)
 
 
164
 
165
  return result.images[0]
166
 
 
82
  pipe.text_encoder.get_input_embeddings().weight.data[token_id] = embeddings_dict["clip_l"][i]
83
  pipe.text_encoder_2.get_input_embeddings().weight.data[token_id] = embeddings_dict["clip_g"][i]
84
 
85
+
86
+ compel = Compel(tokenizer=[pipeline.tokenizer, pipeline.tokenizer_2] , text_encoder=[pipeline.text_encoder, pipeline.text_encoder_2], returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED, requires_pooled=[False, True])
87
+
88
  def error_str(error, title="Error"):
89
  return f"""#### {title}
90
  {error}""" if error else ""
 
127
 
128
  if(prompt=="" and neg_prompt==""):
129
  prompt="1girl, sunflowers, brown bob hair, brown eyes, sky"
130
+ neg_prompt=f"({unaestheticXLv31})+++, ({unaestheticXLv13})+++, ({unaestheticXLv1})+"
131
  return prompt, neg_prompt
132
 
133
  splited_prompt=prompt.replace(","," ").replace("_"," ").split(" ")
 
156
  return prompt,neg_prompt
157
 
158
  def txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator):
159
+ conditioning, pooled = compel([prompt, negative_prompt])
160
+
161
  result = pipe(
162
+ prompt_embeds=conditioning[0:1],
163
+ pooled_prompt_embeds=pooled[0:1],
164
+ negative_prompt_embeds=conditioning[1:2],
165
+ negative_pooled_prompt_embeds=pooled[1:2],
166
+ num_inference_steps = int(steps),
167
+ guidance_scale = guidance,
168
+ width = width,
169
+ height = height,
170
+ generator = generator)
171
 
172
  return result.images[0]
173