sashavor commited on
Commit
452f3b2
1 Parent(s): 792a0ad

adding first section

Browse files
Files changed (1) hide show
  1. app.py +7 -3
app.py CHANGED
@@ -19,14 +19,18 @@ with gr.Blocks() as demo:
19
  examples_gallery = gr.Gallery(get_images(examples_path),
20
  label="Example images generated by three text-to-image models (Dall-E 2, Stable Diffusion v1.4 and v.2).", show_label=True, elem_id="gallery").style(grid=[1,6], height="auto")
21
  gr.HTML('''
22
- <p style="margin-bottom: 14px; font-size: 100%"> As AI-enabled Text-to-Image models are becoming increasingly used, characterizing the social biases they exhibit is a necessary first step to lowering their risk of discriminatory outcomes. <br> We compare three such models: <b> Stable Diffusion v.1.4, Stable Diffusion v.2. </b>, and <b> Dall-E 2 </b>, prompting them to produce images of different <i> professions </i> and <i> identity characteristics </i>. <br> Since artificial depictions of fictive humans have no inherent gender or ethnicity nor do they belong to socially-constructed groups, we pursued our analysis <i> without </i> ascribing gender and ethnicity categories to the images generated, still finding clear evidence of ethnicity and gender biases. You can explore these findings in the sections below: </p>
23
  ''')
24
 
25
  gr.Markdown("""
26
  ### Looking at Identity Groups
27
  """)
 
 
 
 
28
 
29
- with gr.Accordion("Looking at Identity Groups(ethnicity and gender)", open=False):
30
  gr.HTML('''
31
  <p style="margin-bottom: 14px; font-size: 100%"> One of the approaches that we adopted in our work is hierarchical clustering of the images generated by the text-to-image systems in response to prompts that include identity terms with regards to ethnicity and gender. <br> We computed 3 different numbers of clusters (12, 24 and 48) and created an <a href='https://huggingface.co/spaces/society-ethics/DiffusionFaceClustering' style='text-decoration: underline;' target='_blank'> Identity Representation Demo </a> that allows for the exploration of the different clusters and their contents. </p>
32
  ''')
@@ -38,7 +42,7 @@ with gr.Blocks() as demo:
38
  ).style(grid=3, height="auto")
39
  with gr.Column(scale=1):
40
  gr.HTML('''
41
- <p style="margin-bottom: 14px; font-size: 100%"> You can see that the models reflect many societal biases -- for instance representing Native Americans wearing traditional headdresses, non-binary people with stereotypical haircuts and glasses, and East Asian men with features that amplify ethnic stereotypes. <br> This is problematic because it reinforces existing cultural stereotypes and fails to represent the diversity that is present in all identity groups.</p>
42
  ''')
43
  gr.Markdown("""
44
  ### Exploring Biases
 
19
  examples_gallery = gr.Gallery(get_images(examples_path),
20
  label="Example images generated by three text-to-image models (Dall-E 2, Stable Diffusion v1.4 and v.2).", show_label=True, elem_id="gallery").style(grid=[1,6], height="auto")
21
  gr.HTML('''
22
+ <p style="margin-bottom: 14px; font-size: 100%"> As AI-enabled Text-to-Image models are becoming increasingly used, characterizing the social biases they exhibit is a necessary first step to lowering their risk of discriminatory outcomes. <br> We compare three such models: <b> Stable Diffusion v.1.4, Stable Diffusion v.2. </b>, and <b> Dall-E 2 </b>, prompting them to produce images of different <i> professions </i> and <i> identity characteristics </i>. <br> You explore our findings in the sections below: </p>
23
  ''')
24
 
25
  gr.Markdown("""
26
  ### Looking at Identity Groups
27
  """)
28
+
29
+ gr.Markdown("""
30
+ One of the goals of our study was to look at the ways in which different identity groups (ethnicity and gender) are represented by text-to-image models. Since artificial depictions of fictive humans have no inherent gender or ethnicity nor do they belong to socially-constructed groups, we pursued our analysis <i> without </i> ascribing identity categories to the images generated, using unsupervised techniques such as clustering. We find clear evidence of ethnicity and gender biases, which you can see by expanding the accordion below or directly via the [Identity Representation Demo](https://huggingface.co/spaces/society-ethics/DiffusionFaceClustering).
31
+ """)
32
 
33
+ with gr.Accordion("Looking at Identity Groups", open=False):
34
  gr.HTML('''
35
  <p style="margin-bottom: 14px; font-size: 100%"> One of the approaches that we adopted in our work is hierarchical clustering of the images generated by the text-to-image systems in response to prompts that include identity terms with regards to ethnicity and gender. <br> We computed 3 different numbers of clusters (12, 24 and 48) and created an <a href='https://huggingface.co/spaces/society-ethics/DiffusionFaceClustering' style='text-decoration: underline;' target='_blank'> Identity Representation Demo </a> that allows for the exploration of the different clusters and their contents. </p>
36
  ''')
 
42
  ).style(grid=3, height="auto")
43
  with gr.Column(scale=1):
44
  gr.HTML('''
45
+ <p style="margin-bottom: 14px; font-size: 100%"> You can see that the models reflect many societal biases -- for instance representing Native Americans wearing traditional headdresses, non-binary people with stereotypical haircuts and glasses, and East Asian men with features that amplify ethnic stereotypes. <br> <br> This is problematic because it reinforces existing cultural stereotypes and fails to represent the diversity that is present in all identity groups.</p>
46
  ''')
47
  gr.Markdown("""
48
  ### Exploring Biases