sasha HF staff commited on
Commit
7c66f57
1 Parent(s): 503ccc0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -42
app.py CHANGED
@@ -17,71 +17,78 @@ with gr.Blocks() as demo:
17
  ''')
18
  examples_path= "images/examples"
19
  examples_gallery = gr.Gallery(get_images(examples_path),
20
- label="Example images", show_label=False, elem_id="gallery").style(grid=[1,6], height="auto")
21
  gr.HTML('''
22
- <p style="margin-bottom: 10px; font-size: 94%"> Example images generated by three text-to-image models (Dall-E 2, Stable Diffusion v1.4 and v.2). </p>
23
- ''')
24
- gr.HTML('''
25
- <p style="margin-bottom: 14px; font-size: 100%"> As AI-enabled Text-to-Image systems are becoming increasingly used, characterizing the social biases they exhibit is a necessary first step to lowering their risk of discriminatory outcomes. <br> We propose a new method for exploring and quantifying social biases in these kinds of systems by directly comparing collections of generated images designed to showcase a system’s variation across social attributes — gender and ethnicity — and target attributes for bias evaluation — professions and gender-coded adjectives. <br> We compare three models: Stable Diffusion v.1.4, Stable Diffusion v.2., and Dall-E 2 and find <b> clear evidence of ethnicity and gender biases </b>. <br> You can explore these findings in the collapsed sections below, which present our findings: </p>
26
  ''')
 
 
27
 
28
  with gr.Accordion("Identity group results (ethnicity and gender)", open=False):
29
  gr.HTML('''
30
  <p style="margin-bottom: 14px; font-size: 100%"> One of the approaches that we adopted in our work is hierarchical clustering of the images generated by the text-to-image systems in response to prompts that include identity terms with regards to ethnicity and gender. <br> We computed 3 different numbers of clusters (12, 24 and 48) and created an <a href='https://huggingface.co/spaces/society-ethics/DiffusionFaceClustering' style='text-decoration: underline;' target='_blank'> Identity Representation Demo </a> that allows for the exploration of the different clusters and their contents. </p>
31
  ''')
32
  with gr.Row():
33
- impath = "images/identities"
34
- identity_gallery = gr.Gallery([os.path.join(impath,im) for im in os.listdir(impath)],
35
- label="Identity cluster images", show_label=False, elem_id="gallery"
36
- ).style(grid=3, height="auto")
37
- gr.HTML('''
38
- <p style="margin-bottom: 14px; font-size: 100%"> TO DO: talk about what we see above. <br> Continue exploring the demo on your own to uncover other patterns! </p>
 
 
39
  ''')
 
40
 
41
  with gr.Accordion("Bias Exploration", open=False):
42
  gr.HTML('''
43
- <p style="margin-bottom: 14px; font-size: 100%"> We queried our 3 systems with prompts that included names of professions, and one of our goals was to explore the social biases of these models. <br> Since artificial depictions of fictive
44
- humans have no inherent gender or ethnicity nor do they belong to socially-constructed groups, we pursued our analysis <b> without </b> ascribing gender and ethnicity categories to the images generated. <b> We do this by calculating the correlations between the professions and the different identity clusters that we identified. <br> Using both the <a href='https://huggingface.co/spaces/society-ethics/DiffusionClustering' style='text-decoration: underline;' target='_blank'> Diffusion Cluster Explorer </a> and the <a href='https://huggingface.co/spaces/society-ethics/DiffusionFaceClustering' style='text-decoration: underline;' target='_blank'> Identity Representation Demo </a>, we can see which clusters are most correlated with each profession and what identities are in these clusters.</p>
45
  ''')
46
  with gr.Row():
47
- gr.HTML('''
48
- <p style="margin-bottom: 14px; font-size: 100%"> Using the <a href='https://huggingface.co/spaces/society-ethics/DiffusionClustering' style='text-decoration: underline;' target='_blank'> Diffusion Cluster Explorer </a>, we can see that the top cluster for the CEO and director professions is Cluster 4: </p> ''')
49
- ceo_img = gr.Image(Image.open("images/bias/ceo_dir.png"), label = "CEO Image", show_label=False)
 
 
50
 
51
  with gr.Row():
52
- gr.HTML('''
53
- <p style="margin-bottom: 14px; font-size: 100%"> Going back to the <a href='https://huggingface.co/spaces/society-ethics/DiffusionFaceClustering' style='text-decoration: underline;' target='_blank'> Identity Representation Demo </a>, we can see that the most represented gender term is man (56% of the cluster) and White (29% of the cluster). </p> ''')
54
- cluster4 = gr.Image(Image.open("images/bias/Cluster4.png"), label = "Cluster 4 Image", show_label=False)
 
 
55
  with gr.Row():
56
- gr.HTML('''
57
- <p style="margin-bottom: 14px; font-size: 100%"> If we look at the cluster representation of professions such as social assistant and social worker, we can observe that the former is best represented by Cluster 2, whereas the latter has a more uniform representation across multiple clusters: </p> ''')
58
- social_img = gr.Image(Image.open("images/bias/social.png"), label = "social image", show_label=False)
59
-
 
60
  with gr.Row():
61
- gr.HTML('''
62
- <p style="margin-bottom: 14px; font-size: 100%"> Cluster 2 is best represented by the gender term is woman (81%) as well as Latinx (19%). </p> ''')
 
 
63
  cluster4 = gr.Image(Image.open("images/bias/Cluster2.png"), label = "Cluster 2 Image", show_label=False)
64
- with gr.Row():
65
- gr.HTML('''
66
- <p style="margin-bottom: 14px; font-size: 100%"> TO DO: talk about what we see above. <br> Continue exploring the demo on your own to uncover other patterns! </p>''')
67
 
68
  with gr.Accordion("Comparing model generations", open=False):
69
  gr.HTML('''
70
- <p style="margin-bottom: 14px; font-size: 100%"> One of the goals of our study was allowing users to compare model generations across professions in an open-ended way, uncovering patterns and trends on their own. This is why we created the <a href='https://huggingface.co/spaces/society-ethics/DiffusionBiasExplorer' style='text-decoration: underline;' target='_blank'> Diffusion Bias Explorer </a> and the <a href='https://huggingface.co/spaces/society-ethics/Average_diffusion_faces' style='text-decoration: underline;' target='_blank'> Average Diffusion Faces </a> tools. <br> We show some of their functionalities below: </p> ''')
71
- with gr.Row():
72
- with gr.Column():
73
- impath = "images/biasexplorer"
74
- biasexplorer_gallery = gr.Gallery([os.path.join(impath,im) for im in os.listdir(impath)],
75
- label="Bias explorer images", show_label=False, elem_id="gallery").style(grid=2, height="auto")
76
- with gr.Column():
77
  gr.HTML('''
78
- <p style="margin-bottom: 14px; font-size: 100%"> Comparing generations both between two models and within a single model can help uncover trends and patterns that are hard to measure using quantitative approaches. </p>''')
79
  with gr.Row():
80
- impath = "images/averagefaces"
81
- average_gallery = gr.Gallery([os.path.join(impath,im) for im in os.listdir(impath)],
82
- label="Average Face images", show_label=False, elem_id="gallery").style(grid=3, height="auto")
83
- gr.HTML('''
84
- <p style="margin-bottom: 14px; font-size: 100%"> Looking at the average faces for a given profession across multiple models can help see the dominant characteristics of that profession, as well as how much variation there is (based on how fuzzy the image is). </p>''')
 
 
85
 
86
  with gr.Accordion("Exploring the color space of generated images", open=False):
87
  gr.HTML('''
@@ -105,6 +112,6 @@ humans have no inherent gender or ethnicity nor do they belong to socially-const
105
  <a href='https://huggingface.co/spaces/tti-bias/identities-colorfulness-knn' style='text-decoration: underline;' target='_blank'> Colorfulness Profession Explorer </a> <br>
106
  <a href='https://huggingface.co/spaces/tti-bias/professions-colorfulness-knn' style='text-decoration: underline;' target='_blank'> Colorfulness Identities Explorer </a> <br> </p>
107
  ''')
108
- gr.Interface.load("spaces/society-ethics/DiffusionBiasExplorer")
109
 
110
  demo.launch(debug=True)
 
17
  ''')
18
  examples_path= "images/examples"
19
  examples_gallery = gr.Gallery(get_images(examples_path),
20
+ label="Example images generated by three text-to-image models (Dall-E 2, Stable Diffusion v1.4 and v.2).", show_label=True, elem_id="gallery").style(grid=[1,6], height="auto")
21
  gr.HTML('''
22
+ <p style="margin-bottom: 14px; font-size: 100%"> As AI-enabled Text-to-Image models are becoming increasingly used, characterizing the social biases they exhibit is a necessary first step to lowering their risk of discriminatory outcomes. <br> We compare three such models: <b> Stable Diffusion v.1.4, Stable Diffusion v.2. </b>, and <b> Dall-E 2 </b>, prompting them to produce images of different <i> professions </i> and <i> identity characteristics </i>. <br> Since artificial depictions of fictive humans have no inherent gender or ethnicity nor do they belong to socially-constructed groups, we pursued our analysis <i> without </i> ascribing gender and ethnicity categories to the images generated, still finding clear evidence of ethnicity and gender biases. You can explore these findings in the sections below: </p>
 
 
 
23
  ''')
24
+
25
+
26
 
27
  with gr.Accordion("Identity group results (ethnicity and gender)", open=False):
28
  gr.HTML('''
29
  <p style="margin-bottom: 14px; font-size: 100%"> One of the approaches that we adopted in our work is hierarchical clustering of the images generated by the text-to-image systems in response to prompts that include identity terms with regards to ethnicity and gender. <br> We computed 3 different numbers of clusters (12, 24 and 48) and created an <a href='https://huggingface.co/spaces/society-ethics/DiffusionFaceClustering' style='text-decoration: underline;' target='_blank'> Identity Representation Demo </a> that allows for the exploration of the different clusters and their contents. </p>
30
  ''')
31
  with gr.Row():
32
+ with gr.Column(scale=2):
33
+ impath = "images/identities"
34
+ identity_gallery = gr.Gallery([os.path.join(impath,im) for im in os.listdir(impath)],
35
+ label="Identity cluster images", show_label=False, elem_id="gallery"
36
+ ).style(grid=3, height="auto")
37
+ with gr.Column(scale=1):
38
+ gr.HTML('''
39
+ <p style="margin-bottom: 14px; font-size: 100%"> You can see that the models reflect many societal biases -- for instance representing Native Americans wearing traditional headdresses, non-binary people with stereotypical haircuts and glasses, and East Asian men with features that amplify ethnic stereotypes. <br> This is problematic because it reinforces existing cultural stereotypes and fails to represent the diversity that is present in all identity groups.</p>
40
  ''')
41
+
42
 
43
  with gr.Accordion("Bias Exploration", open=False):
44
  gr.HTML('''
45
+ <p style="margin-bottom: 14px; font-size: 100%"> We also explore the correlations between the professions that use used in our prompts and the different identity clusters that we identified. <br> Using both the <a href='https://huggingface.co/spaces/society-ethics/DiffusionClustering' style='text-decoration: underline;' target='_blank'> Diffusion Cluster Explorer </a> and the <a href='https://huggingface.co/spaces/society-ethics/DiffusionFaceClustering' style='text-decoration: underline;' target='_blank'> Identity Representation Demo </a>, we can see which clusters are most correlated with each profession and what identities are in these clusters.</p>
 
46
  ''')
47
  with gr.Row():
48
+ with gr.Column():
49
+ gr.HTML('''
50
+ <p style="margin-bottom: 14px; font-size: 100%"> Using the <b><a href='https://huggingface.co/spaces/society-ethics/DiffusionClustering' style='text-decoration: underline;' target='_blank'> Diffusion Cluster Explorer</a></b>, we can see that the top cluster for the CEO and director professions is <b> Cluster 4</b>: </p> ''')
51
+ with gr.Column():
52
+ ceo_img = gr.Image(Image.open("images/bias/ceo_dir.png"), label = "CEO Image", show_label=False)
53
 
54
  with gr.Row():
55
+ with gr.Column():
56
+ gr.HTML('''
57
+ <p style="margin-bottom: 14px; font-size: 100%"> Going back to the <b><a href='https://huggingface.co/spaces/society-ethics/DiffusionFaceClustering' style='text-decoration: underline;' target='_blank'> Identity Representation Demo </a></b>, we can see that the most represented gender term is <i> man </i> (56% of the cluster) and <i> White </i> (29% of the cluster). <br> This is consistent with common stereotypes regarding people in positions of power, who are predominantly male, according to the US Labor Bureau Statistics. </p> ''')
58
+ with gr.Column():
59
+ cluster4 = gr.Image(Image.open("images/bias/Cluster4.png"), label = "Cluster 4 Image", show_label=False)
60
  with gr.Row():
61
+ with gr.Column():
62
+ gr.HTML('''
63
+ <p style="margin-bottom: 14px; font-size: 100%"> If we look at the cluster representation of professions such as social assistant and social worker, we can observe that the former is best represented by <b>Cluster 2</b>, whereas the latter has a more uniform representation across multiple clusters: </p> ''')
64
+ with gr.Column():
65
+ social_img = gr.Image(Image.open("images/bias/social.png"), label = "social image", show_label=False)
66
  with gr.Row():
67
+ with gr.Column(scale=1):
68
+ gr.HTML('''
69
+ <p style="margin-bottom: 14px; font-size: 100%"> Cluster 2 is best represented by the gender term is <i> woman </i> (81%) as well as <i> Latinx </i> (19%) <br> This gender proportion is exactly the same as the one provided by the United States Labor Bureau (which you can see in the table above), with 81% of social assistants identifying as women. </p> ''')
70
+ with gr.Column(scale=2):
71
  cluster4 = gr.Image(Image.open("images/bias/Cluster2.png"), label = "Cluster 2 Image", show_label=False)
 
 
 
72
 
73
  with gr.Accordion("Comparing model generations", open=False):
74
  gr.HTML('''
75
+ <p style="margin-bottom: 14px; font-size: 100%"> One of the goals of our study was allowing users to compare model generations across professions in an open-ended way, uncovering patterns and trends on their own. This is why we created the <a href='https://huggingface.co/spaces/society-ethics/DiffusionBiasExplorer' style='text-decoration: underline;' target='_blank'> Diffusion Bias Explorer </a> and the <a href='https://huggingface.co/spaces/society-ethics/Average_diffusion_faces' style='text-decoration: underline;' target='_blank'> Average Diffusion Faces </a> tools. We show some of their functionalities below: </p> ''')
76
+ with gr.Row():
77
+ with gr.Column(scale=2):
78
+ explorerpath = "images/biasexplorer"
79
+ biasexplorer_gallery = gr.Gallery(get_images(explorerpath),
80
+ label="Bias explorer images", show_label=False, elem_id="gallery").style(grid=[2])
81
+ with gr.Column(scale=1):
82
  gr.HTML('''
83
+ <p style="margin-bottom: 14px; font-size: 100%"> Comparing generations both between two models and within a single model can help uncover trends and patterns that are hard to measure using quantitative approaches. <br> For instance, we can observe that both Dall-E 2 and Stable Diffusion 2 represent both <i> CEOs </i> and <i> nurses </i> as homogenous groups with distinct characteristics, such as ties and scrubs (which makes sense given the results of our clustering, shown above. <br> We can also see that the images of <i> waitresses </i> generated by Dall-E 2 and Stable Diffusion v.1.4. have different characteristics, both in terms of their clothes as well as their appearance. <br> It's also possible to see harder to describe phenomena, like the fact that portraits of <i> painters </i> often look like paintings themselves. <br> We encourage you to use the <a href='https://huggingface.co/spaces/society-ethics/DiffusionBiasExplorer' style='text-decoration: underline;' target='_blank'> Diffusion Bias Explorer </a> tool to explore these phenomena further! </p>''')
84
  with gr.Row():
85
+ with gr.Column(scale=2):
86
+ averagepath = "images/averagefaces"
87
+ average_gallery = gr.Gallery(get_images(averagepath),
88
+ label="Average Face images", show_label=False, elem_id="gallery").style(grid=[2])
89
+ with gr.Column(scale=1):
90
+ gr.HTML('''
91
+ <p style="margin-bottom: 14px; font-size: 100%"> Looking at the average faces for a given profession across multiple models can help see the dominant characteristics of that profession, as well as how much variation there is (based on how fuzzy the image is). <br> In the images shown here, we can see that representations of these professions significantly differ across the three models, while sharing common characteristics, e.g. <i> postal workers </i> all wear caps. <br> Also, the average faces of <i> hairdressers </i> seem more fuzzy than the other professions, indicating a higher diversity among the generations compared to other professions.</p>''')
92
 
93
  with gr.Accordion("Exploring the color space of generated images", open=False):
94
  gr.HTML('''
 
112
  <a href='https://huggingface.co/spaces/tti-bias/identities-colorfulness-knn' style='text-decoration: underline;' target='_blank'> Colorfulness Profession Explorer </a> <br>
113
  <a href='https://huggingface.co/spaces/tti-bias/professions-colorfulness-knn' style='text-decoration: underline;' target='_blank'> Colorfulness Identities Explorer </a> <br> </p>
114
  ''')
115
+ # gr.Interface.load("spaces/society-ethics/DiffusionBiasExplorer")
116
 
117
  demo.launch(debug=True)