yjernite commited on
Commit
ed3c11f
1 Parent(s): 07e6d39

moved galleries

Browse files
Files changed (1) hide show
  1. app.py +115 -48
app.py CHANGED
@@ -64,13 +64,14 @@ def show_id_images(cl_id_1, cl_id_2, cl_id_3):
64
  with gr.Blocks() as demo:
65
  gr.Markdown(
66
  """
67
- ## Stable Bias: Analyzing Societal Representations in Diffusion Models
68
- """
69
- )
70
- gr.HTML(
 
 
 
71
  """
72
- <p style="margin-bottom: 10px; font-size: 94%">This is the demo page for the "Stable Bias" paper, which aims to explore and quantify social biases in text-to-image systems. <br> This work was done by <a href='https://huggingface.co/sasha' style='text-decoration: underline;' target='_blank'> Alexandra Sasha Luccioni (Hugging Face) </a>, <a href='https://huggingface.co/cakiki' style='text-decoration: underline;' target='_blank'> Christopher Akiki (ScaDS.AI, Leipzig University)</a>, <a href='https://huggingface.co/meg' style='text-decoration: underline;' target='_blank'> Margaret Mitchell (Hugging Face) </a> and <a href='https://huggingface.co/yjernite' style='text-decoration: underline;' target='_blank'> Yacine Jernite (Hugging Face) </a> .</p>
73
- """
74
  )
75
  gr.HTML(
76
  """<span style="color:red" font-size:smaller>⚠️ DISCLAIMER: the images displayed by this tool were generated by text-to-image systems and may depict offensive stereotypes or contain explicit content.</span>"""
@@ -84,12 +85,20 @@ with gr.Blocks() as demo:
84
  ).style(grid=[1, 6], height="auto")
85
  gr.HTML(
86
  """
87
- <p style="margin-bottom: 14px; font-size: 100%"> As AI-enabled Text-to-Image models are becoming increasingly used, characterizing the social biases they exhibit is a necessary first step to lowering their risk of discriminatory outcomes. <br> We compare three such models: <b> Stable Diffusion v.1.4, Stable Diffusion v.2. </b>, and <b> Dall-E 2 </b>, prompting them to produce images of different <i> professions </i> and <i> identity characteristics </i>. <br> You explore our findings in the sections below: </p>
88
  """
89
  )
90
 
91
  gr.Markdown(
92
  """
 
 
 
 
 
 
 
 
93
  ### How do Diffusion Models Represent Identity?
94
 
95
  One of the goals of our study was to look at the ways in which pictures generated by text-to-image models depict different notions of gender and ethnicity.
@@ -249,7 +258,7 @@ with gr.Blocks() as demo:
249
 
250
  gr.Markdown(
251
  """
252
- ### Quantifying Social Biases in Image Generations of Professions
253
 
254
  Machine Learning models encode and amplify biases that are represented in the data that they are trained on -
255
  this can include, for instance, stereotypes around the appearances of members of different professions.
@@ -260,66 +269,124 @@ with gr.Blocks() as demo:
260
  Read more about our findings in the accordion below or directly via the [Diffusion Cluster Explorer](https://hf.co/spaces/society-ethics/DiffusionClustering) tool.
261
  """
262
  )
263
- with gr.Accordion("Quantifying Social Biases in Image Generations of Professions", open=False):
264
  gr.Markdown(
265
  """
266
  <br/>
267
  We also explore the correlations between the professions that use used in our prompts and the different identity clusters that we identified.
268
 
269
- Using both the [Diffusion Cluster Explorer](https://hf.co/spaces/society-ethics/DiffusionClustering)
270
- and the [Identity Representation Demo](https://hf.co/spaces/society-ethics/DiffusionFaceClustering),
271
  we can see which clusters are most correlated with each profession and what identities are in these clusters.
272
  """
273
  )
 
274
  with gr.Row():
275
- with gr.Column():
276
- gr.HTML(
277
  """
278
- <p style="margin-bottom: 14px; font-size: 100%"> Using the <b><a href='https://huggingface.co/spaces/society-ethics/DiffusionClustering' style='text-decoration: underline;' target='_blank'> Diffusion Cluster Explorer</a></b>, we can see that the top cluster for the CEO and director professions is <b> Cluster 4</b>: </p> """
279
- )
280
- with gr.Column():
281
- ceo_img = gr.Image(
282
- Image.open("images/bias/ceo_dir.png"),
283
- label="CEO Image",
284
- show_label=False,
285
- )
286
-
287
- with gr.Row():
288
- with gr.Column():
289
- gr.HTML(
290
  """
291
- <p style="margin-bottom: 14px; font-size: 100%"> Going back to the <b><a href='https://huggingface.co/spaces/society-ethics/DiffusionFaceClustering' style='text-decoration: underline;' target='_blank'> Identity Representation Demo </a></b>, we can see that the most represented gender term is <i> man </i> (56% of the cluster) and <i> White </i> (29% of the cluster). <br> This is consistent with common stereotypes regarding people in positions of power, who are predominantly male, according to the US Labor Bureau Statistics. </p> """
292
  )
293
- with gr.Column():
294
- cluster4 = gr.Image(
295
- Image.open("images/bias/Cluster4.png"),
296
- label="Cluster 4 Image",
 
 
 
 
 
 
297
  show_label=False,
298
  )
299
- with gr.Row():
300
- with gr.Column():
301
- gr.HTML(
302
- """
303
- <p style="margin-bottom: 14px; font-size: 100%"> If we look at the cluster representation of professions such as social assistant and social worker, we can observe that the former is best represented by <b>Cluster 2</b>, whereas the latter has a more uniform representation across multiple clusters: </p> """
304
- )
305
- with gr.Column():
306
- social_img = gr.Image(
307
- Image.open("images/bias/social.png"),
308
- label="social image",
309
- show_label=False,
310
  )
311
  with gr.Row():
312
  with gr.Column(scale=1):
313
- gr.HTML(
314
- """
315
- <p style="margin-bottom: 14px; font-size: 100%"> Cluster 2 is best represented by the gender term is <i> woman </i> (81%) as well as <i> Latinx </i> (19%) <br> This gender proportion is exactly the same as the one provided by the United States Labor Bureau (which you can see in the table above), with 81% of social assistants identifying as women. </p> """
 
 
 
 
 
 
 
 
 
 
316
  )
317
- with gr.Column(scale=2):
318
- cluster4 = gr.Image(
319
- Image.open("images/bias/Cluster2.png"),
320
- label="Cluster 2 Image",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
321
  show_label=False,
 
 
 
 
 
 
 
 
 
 
 
 
322
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
323
 
324
  gr.Markdown(
325
  """
 
64
  with gr.Blocks() as demo:
65
  gr.Markdown(
66
  """
67
+ ## Stable Bias: Analyzing Societal Representations in Diffusion Models
68
+
69
+ This is the demo page for the "Stable Bias" paper, which aims to explore and quantify social biases in text-to-image systems.
70
+ <br/>
71
+ This work was done by [Alexandra Sasha Luccioni (Hugging Face)](https://huggingface.co/sasha),
72
+ [Christopher Akiki (ScaDS.AI, Leipzig University)](https://huggingface.co/cakiki), [Margaret Mitchell (Hugging Face)](https://huggingface.co/meg),
73
+ and [Yacine Jernite (Hugging Face)](https://huggingface.co/yjernite).
74
  """
 
 
75
  )
76
  gr.HTML(
77
  """<span style="color:red" font-size:smaller>⚠️ DISCLAIMER: the images displayed by this tool were generated by text-to-image systems and may depict offensive stereotypes or contain explicit content.</span>"""
 
85
  ).style(grid=[1, 6], height="auto")
86
  gr.HTML(
87
  """
88
+ <p style="margin-bottom: 14px; font-size: 100%"> </p>
89
  """
90
  )
91
 
92
  gr.Markdown(
93
  """
94
+ As AI-enabled Text-to-Image models are becoming increasingly used, characterizing the social biases they exhibit
95
+ is a necessary first step to lowering their risk of discriminatory outcomes.
96
+ <br/>
97
+ We compare three such models: **Stable Diffusion v.1.4, Stable Diffusion v.2.**, and **Dall-E 2**,
98
+ prompting them to produce images of different *professions* and *identity characteristics*.
99
+ <br/>
100
+ You can explore our findings in the sections below:
101
+
102
  ### How do Diffusion Models Represent Identity?
103
 
104
  One of the goals of our study was to look at the ways in which pictures generated by text-to-image models depict different notions of gender and ethnicity.
 
258
 
259
  gr.Markdown(
260
  """
261
+ ### Quantifying Social Biases in Image Generations: Professions
262
 
263
  Machine Learning models encode and amplify biases that are represented in the data that they are trained on -
264
  this can include, for instance, stereotypes around the appearances of members of different professions.
 
269
  Read more about our findings in the accordion below or directly via the [Diffusion Cluster Explorer](https://hf.co/spaces/society-ethics/DiffusionClustering) tool.
270
  """
271
  )
272
+ with gr.Accordion("Quantifying Social Biases in Image Generations: Professions", open=False):
273
  gr.Markdown(
274
  """
275
  <br/>
276
  We also explore the correlations between the professions that use used in our prompts and the different identity clusters that we identified.
277
 
278
+ Using the [Profession Bias Tool](https://hf.co/spaces/society-ethics/DiffusionClustering)
 
279
  we can see which clusters are most correlated with each profession and what identities are in these clusters.
280
  """
281
  )
282
+ impath = "images/bias"
283
  with gr.Row():
284
+ with gr.Column(scale=1):
285
+ gr.Markdown(
286
  """
287
+ #### [Diversity and Representation across Models](https://hf.co/spaces/society-ethics/DiffusionClustering "you can cycle through screenshots of the tool in use on the right, or go straight to the interactive demo")
288
+
289
+ Using the **[Profession Bias Tool](https://hf.co/spaces/society-ethics/DiffusionClustering)**,
290
+ we can see that the top cluster for the CEO and director professions is **Cluster 4**:
291
+
292
+ We can see that the most represented gender term is *man* (56% of the cluster) and *White* (29% of the cluster).
293
+ This is consistent with common stereotypes regarding people in positions of power, who are predominantly male, according to the US Labor Bureau Statistics.
 
 
 
 
 
294
  """
 
295
  )
296
+ with gr.Column(scale=1):
297
+ bias_cl_id_1 = gr.Dropdown(
298
+ choices=[
299
+ "Results table: all models",
300
+ "Results table: Stable Diffusion v1.4",
301
+ "Results table: Stable Diffusion v2,",
302
+ "Results table: Stable Diffusion Dall-E 2",
303
+ "Comparison histogram: all professions",
304
+ ],
305
+ value="Results table: all models",
306
  show_label=False,
307
  )
308
+ bias_screenshot_1 = gr.Image(
309
+ value=os.path.join(impath, "cluster_assign_24_all.png"),
310
+ label="Screenshot of the Profession Bias Tool | Results table: all models",
 
 
 
 
 
 
 
 
311
  )
312
  with gr.Row():
313
  with gr.Column(scale=1):
314
+ bias_cl_id_2 = gr.Dropdown(
315
+ choices=[
316
+ "Results table: mental health professions, all models",
317
+ "Comparison histogram: psychologist",
318
+ "Comparison histogram: social worker",
319
+ "Comparison histogram: social assistant",
320
+ ],
321
+ value="Results table: mental health professions, all models",
322
+ show_label=False,
323
+ )
324
+ bias_screenshot_2 = gr.Image(
325
+ value=os.path.join(impath, "cluster_assign_mental_health_24_all.png"),
326
+ label="Screenshot of the Profession Bias Tool | Results table: mental health professions, all models",
327
  )
328
+ mental_helth_examlpars = gr.Gallery(
329
+ [
330
+ (Image.open(os.path.join(impath, im)), name)
331
+ for im, name in [
332
+ ("social_assistant_0_of_24.png", "Generated images of 'social assistant' assigned to cluster 0 of 24"),
333
+ ("social_assistant_2_of_24.png", "Generated images of 'social assistant' assigned to cluster 2 of 24"),
334
+ ("social_assistant_5_of_24.png", "Generated images of 'social assistant' assigned to cluster 5 of 24"),
335
+ ("social_assistant_0_of_24.png", "Generated images of 'social assistant' assigned to cluster 0 of 24"),
336
+ ("social_assistant_0_of_24.png", "Generated images of 'social assistant' assigned to cluster 0 of 24"),
337
+ ("social_worker_0_of_24.png", "Generated images of 'social worker' assigned to cluster 0 of 24"),
338
+ ("social_worker_2_of_24.png", "Generated images of 'social worker' assigned to cluster 2 of 24"),
339
+ ("social_worker_5_of_24.png", "Generated images of 'social worker' assigned to cluster 5 of 24"),
340
+ ("social_worker_0_of_24.png", "Generated images of 'social worker' assigned to cluster 0 of 24"),
341
+ ("social_worker_0_of_24.png", "Generated images of 'social worker' assigned to cluster 0 of 24"),
342
+ ("psychologist_0_of_24.png", "Generated images of 'psychologists' assigned to cluster 0 of 24"),
343
+ ("psychologist_2_of_24.png", "Generated images of 'psychologists' assigned to cluster 2 of 24"),
344
+ ("psychologist_5_of_24.png", "Generated images of 'psychologists' assigned to cluster 5 of 24"),
345
+ ("psychologist_0_of_24.png", "Generated images of 'psychologists' assigned to cluster 0 of 24"),
346
+ ("psychologist_0_of_24.png", "Generated images of 'psychologists' assigned to cluster 0 of 24"),
347
+ ]
348
+ ],
349
+ label="Example images generated by three text-to-image models (Dall-E 2, Stable Diffusion v1.4 and v.2).",
350
  show_label=False,
351
+ ).style(grid=[3, 5], height="auto")
352
+ with gr.Column():
353
+ gr.Markdown(
354
+ """
355
+ #### [Focused Comparison: Mental Health Professions](https://hf.co/spaces/society-ethics/DiffusionClustering "you can cycle through screenshots of the tool in use on the left and example images below, or go straight to the interactive demo")
356
+
357
+ If we look at the cluster representation of professions such as social assistant and social worker,
358
+ we can observe that the former is best represented by **Cluster 2**, whereas the latter has a more uniform representation across multiple clusters:
359
+
360
+ Cluster 2 is best represented by the gender term is *woman* (81%) as well as *Latinx* (19%).
361
+ This gender proportion is exactly the same as the one provided by the United States Labor Bureau (which you can see in the table above), with 81% of social assistants identifying as women.
362
+ """
363
  )
364
+ if False:
365
+ # with gr.Row():
366
+ mental_helth_examlpars = gr.Gallery(
367
+ [
368
+ (Image.open(os.path.join(impath, im)), name)
369
+ for im, name in [
370
+ ("social_assistant_0_of_24.png", "Generated images of 'social assistant' assigned to cluster 0 of 24"),
371
+ ("social_assistant_2_of_24.png", "Generated images of 'social assistant' assigned to cluster 2 of 24"),
372
+ ("social_assistant_5_of_24.png", "Generated images of 'social assistant' assigned to cluster 5 of 24"),
373
+ ("social_assistant_0_of_24.png", "Generated images of 'social assistant' assigned to cluster 0 of 24"),
374
+ ("social_assistant_0_of_24.png", "Generated images of 'social assistant' assigned to cluster 0 of 24"),
375
+ ("social_worker_0_of_24.png", "Generated images of 'social worker' assigned to cluster 0 of 24"),
376
+ ("social_worker_2_of_24.png", "Generated images of 'social worker' assigned to cluster 2 of 24"),
377
+ ("social_worker_5_of_24.png", "Generated images of 'social worker' assigned to cluster 5 of 24"),
378
+ ("social_worker_0_of_24.png", "Generated images of 'social worker' assigned to cluster 0 of 24"),
379
+ ("social_worker_0_of_24.png", "Generated images of 'social worker' assigned to cluster 0 of 24"),
380
+ ("psychologist_0_of_24.png", "Generated images of 'psychologists' assigned to cluster 0 of 24"),
381
+ ("psychologist_2_of_24.png", "Generated images of 'psychologists' assigned to cluster 2 of 24"),
382
+ ("psychologist_5_of_24.png", "Generated images of 'psychologists' assigned to cluster 5 of 24"),
383
+ ("psychologist_0_of_24.png", "Generated images of 'psychologists' assigned to cluster 0 of 24"),
384
+ ("psychologist_0_of_24.png", "Generated images of 'psychologists' assigned to cluster 0 of 24"),
385
+ ]
386
+ ],
387
+ label="Example images generated by three text-to-image models (Dall-E 2, Stable Diffusion v1.4 and v.2).",
388
+ show_label=False,
389
+ ).style(grid=[3, 5], height="auto")
390
 
391
  gr.Markdown(
392
  """