zseid commited on
Commit
62c99c3
1 Parent(s): f832251

set lower precision

Browse files
Files changed (1) hide show
  1. app.py +12 -9
app.py CHANGED
@@ -28,7 +28,8 @@ results = dict()
28
  results[STABLE_MODELS[0]] = process_analysis(os.path.join(EVAL_DATA_DIRECTORY,'raw',"stable_diffusion_raw_processed.csv"))
29
  results[STABLE_MODELS[1]] = process_analysis(os.path.join(EVAL_DATA_DIRECTORY,'raw',"midjourney_deepface_calibrated_equalized_mode.csv"))
30
 
31
- scheduler = PNDMScheduler.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="scheduler", prediction_type="v_prediction")
 
32
  pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", scheduler=scheduler)
33
  pipe = pipe.to(device)
34
 
@@ -100,7 +101,7 @@ def adj_example(adj):
100
  prompt = f"a {adj} person photorealistic"
101
  return example_analysis(prompt)
102
  def example_analysis(prompt):
103
- pil_img = pipe(prompt,num_inference_steps=29).images[0]
104
  # pil_img = Image.open('./this-is-fine.0.jpg')
105
  df = process_image_pil(pil_img,prompt)
106
  rgb_tup = (128,128,128)
@@ -181,18 +182,20 @@ if __name__=='__main__':
181
 
182
  inp = gr.Textbox(label="Prompt",placeholder="Try selecting a prompt or enter your own",)
183
  gr.Markdown("If the above component is stuck, try switching between the dropdown options.")
184
- sent = gr.Dropdown(LOOKS,label="Trait")
185
- with gr.Accordion("Details",open=False):
186
  gr.Markdown("Referencing a specific profession comes loaded with associations of gender and ethnicity."
187
  " Text to image models provide an opportunity to explicitly specify an underrepresented group, but first we must understand our default behavior. "
188
  "To view how mentioning a particular occupation affects the gender and skin colors in faces of text to image generators, select a job. Promotional materials,"
189
  " advertising, and even criminal sketches which do not explicitly specify a gender or ethnicity term will tend towards the distributions in the Model Audit tab.")
190
- occs = gr.Dropdown(JOBS,label="Occupation")
191
- with gr.Accordion("Details",open=False):
 
192
  gr.Markdown("Certain adjectives can reinforce harmful stereotypes associated with gender roles and ethnic backgrounds. "
193
  "Text to image models provide an opportunity to understand how prompting a particular human expression could be triggering, "
194
  "or why an uncommon combination might provide important examples to minorities without default representation."
195
  "To view how positive, neutral, and negative words affect the gender and skin colors in the faces generated, select an adjective.")
 
196
  btn = gr.Button("Generate and Analyze")
197
  with gr.Column():
198
 
@@ -202,8 +205,8 @@ if __name__=='__main__':
202
  inten = gr.ColorPicker(label="Grayscale intensity")
203
  img = gr.Image(label="Stable Diffusion v1.5")
204
  sentscore = gr.Text(label="VADER sentiment score",interactive=False)
205
- sent.change(fn=lambda k: f"a {k} person photorealistic", inputs=sent, outputs=inp)
206
- occs.change(fn=lambda k: f"a {k} photorealistic", inputs=occs, outputs=inp,)
207
  btn.click(fn=example_analysis,inputs=inp,outputs=[img,gender,skin,inten,sentscore])
208
  # inp.submit(fn=example_analysis, outputs=[img,gender,skin,inten])
209
 
@@ -229,4 +232,4 @@ if __name__=='__main__':
229
  # ["Occupational Bias", "Adjectival Bias", "Prompt analysis",'FACIA model auditing'],
230
  # title = "Text-to-Image Bias Explorer"
231
  # ).launch()
232
- demo.launch()
 
28
  results[STABLE_MODELS[0]] = process_analysis(os.path.join(EVAL_DATA_DIRECTORY,'raw',"stable_diffusion_raw_processed.csv"))
29
  results[STABLE_MODELS[1]] = process_analysis(os.path.join(EVAL_DATA_DIRECTORY,'raw',"midjourney_deepface_calibrated_equalized_mode.csv"))
30
 
31
+ scheduler = PNDMScheduler.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="scheduler", prediction_type="v_prediction",revision="fp16",
32
+ torch_dtype=torch.float16)
33
  pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", scheduler=scheduler)
34
  pipe = pipe.to(device)
35
 
 
101
  prompt = f"a {adj} person photorealistic"
102
  return example_analysis(prompt)
103
  def example_analysis(prompt):
104
+ pil_img = pipe(prompt,num_inference_steps=20).images[0]
105
  # pil_img = Image.open('./this-is-fine.0.jpg')
106
  df = process_image_pil(pil_img,prompt)
107
  rgb_tup = (128,128,128)
 
182
 
183
  inp = gr.Textbox(label="Prompt",placeholder="Try selecting a prompt or enter your own",)
184
  gr.Markdown("If the above component is stuck, try switching between the dropdown options.")
185
+ with gr.Tab("Trait/Sentiment"):
186
+ sent = gr.Dropdown(LOOKS,label="Trait")
187
  gr.Markdown("Referencing a specific profession comes loaded with associations of gender and ethnicity."
188
  " Text to image models provide an opportunity to explicitly specify an underrepresented group, but first we must understand our default behavior. "
189
  "To view how mentioning a particular occupation affects the gender and skin colors in faces of text to image generators, select a job. Promotional materials,"
190
  " advertising, and even criminal sketches which do not explicitly specify a gender or ethnicity term will tend towards the distributions in the Model Audit tab.")
191
+ sent.change(fn=lambda k: f"a {k} person photorealistic", inputs=sent, outputs=inp)
192
+ with gr.Tab("Occupation/Income"):
193
+ occs = gr.Dropdown(JOBS,label="Occupation")
194
  gr.Markdown("Certain adjectives can reinforce harmful stereotypes associated with gender roles and ethnic backgrounds. "
195
  "Text to image models provide an opportunity to understand how prompting a particular human expression could be triggering, "
196
  "or why an uncommon combination might provide important examples to minorities without default representation."
197
  "To view how positive, neutral, and negative words affect the gender and skin colors in the faces generated, select an adjective.")
198
+ occs.change(fn=lambda k: f"a {k} photorealistic", inputs=occs, outputs=inp, )
199
  btn = gr.Button("Generate and Analyze")
200
  with gr.Column():
201
 
 
205
  inten = gr.ColorPicker(label="Grayscale intensity")
206
  img = gr.Image(label="Stable Diffusion v1.5")
207
  sentscore = gr.Text(label="VADER sentiment score",interactive=False)
208
+
209
+
210
  btn.click(fn=example_analysis,inputs=inp,outputs=[img,gender,skin,inten,sentscore])
211
  # inp.submit(fn=example_analysis, outputs=[img,gender,skin,inten])
212
 
 
232
  # ["Occupational Bias", "Adjectival Bias", "Prompt analysis",'FACIA model auditing'],
233
  # title = "Text-to-Image Bias Explorer"
234
  # ).launch()
235
+ demo.launch(enable_queue=True,)