Spaces:
Runtime error
Runtime error
sashavor
commited on
Commit
•
0ebf1f9
1
Parent(s):
3bbd44f
making changes, as per Meg's ideas
Browse files
app.py
CHANGED
@@ -18,9 +18,11 @@ with gr.Blocks() as demo:
|
|
18 |
examples_path= "images/examples"
|
19 |
examples_gallery = gr.Gallery(get_images(examples_path),
|
20 |
label="Example images", show_label=False, elem_id="gallery").style(grid=[1,6], height="auto")
|
21 |
-
|
22 |
gr.HTML('''
|
23 |
-
<p style="margin-bottom:
|
|
|
|
|
|
|
24 |
''')
|
25 |
|
26 |
with gr.Accordion("Identity group results (ethnicity and gender)", open=False):
|
|
|
18 |
examples_path= "images/examples"
|
19 |
examples_gallery = gr.Gallery(get_images(examples_path),
|
20 |
label="Example images", show_label=False, elem_id="gallery").style(grid=[1,6], height="auto")
|
|
|
21 |
gr.HTML('''
|
22 |
+
<p style="margin-bottom: 10px; font-size: 94%"> Example images generated by three text-to-image models (Dall-E 2, Stable Diffusion v1.4 and v.2). </p>
|
23 |
+
''')
|
24 |
+
gr.HTML('''
|
25 |
+
<p style="margin-bottom: 14px; font-size: 100%"> As AI-enabled Text-to-Image systems are becoming increasingly used, characterizing the social biases they exhibit is a necessary first step to lowering their risk of discriminatory outcomes. <br> We propose a new method for exploring and quantifying social biases in these kinds of systems by directly comparing collections of generated images designed to showcase a system’s variation across social attributes — gender and ethnicity — and target attributes for bias evaluation — professions and gender-coded adjectives. <br> We compare three models: Stable Diffusion v.1.4, Stable Diffusion v.2., and Dall-E 2 and find <b> clear evidence of ethnicity and gender biases <b>. <br> You can explore these findings in the collapsed sections below, which present our findings: </p>
|
26 |
''')
|
27 |
|
28 |
with gr.Accordion("Identity group results (ethnicity and gender)", open=False):
|