yjernite commited on
Commit
ff03d77
1 Parent(s): 79449fa

intro text

Browse files
Files changed (1) hide show
  1. app.py +30 -12
app.py CHANGED
@@ -9,7 +9,29 @@ import operator
9
  pd.options.plotting.backend = "plotly"
10
 
11
 
12
- TITLE = "Diffusion Professions Cluster Explorer"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
  professions_dset = load_from_disk("professions")
15
  professions_df = professions_dset.to_pandas()
@@ -92,7 +114,7 @@ def make_profession_plot(num_clusters, prof_name):
92
  )
93
  df = pd.DataFrame.from_dict(pre_pandas)
94
  prof_plot = df.plot(kind="bar", barmode="group")
95
- cl_summary_text = f"Profession ``{prof_name}'':\n"
96
  for cl_id, _ in sorted_cl_scores:
97
  cl_summary_text += f"- {cluster_summaries_by_size[str(num_clusters)][int(cl_id)].replace(' gender terms', '').replace('; ethnicity terms:', ',')} \n"
98
  return (
@@ -206,20 +228,16 @@ def show_examplars(num_clusters, prof_name, cl_id, confidence_threshold=0.6):
206
 
207
 
208
  with gr.Blocks(title=TITLE) as demo:
209
- gr.Markdown(
210
- """
211
- # Identity Biases in Diffusion Models: Professions
212
-
213
- This tool helps you explore the different clusters that we discovered in the images generated by 3 text-to-image models: Dall-E 2, Stable Diffusion v.1.4 and v.2.
214
- This work was done in the scope of the [Stable Bias Project](https://huggingface.co/spaces/society-ethics/StableBias).
215
- """
216
- )
217
  gr.HTML(
218
  """<span style="color:red" font-size:smaller>⚠️ DISCLAIMER: the images displayed by this tool were generated by text-to-image systems and may depict offensive stereotypes or contain explicit content.</span>"""
219
  )
220
  with gr.Tab("Professions Overview"):
221
  gr.Markdown(
222
- "Select one or more professions and models from the dropdowns on the left to see which clusters are most representative for this combination. Try choosing different numbers of clusters to see if the results change, and then go to the 'Profession Focus' tab to go more in-depth into these results."
 
 
 
223
  )
224
  with gr.Row():
225
  with gr.Column(scale=1):
@@ -277,7 +295,7 @@ with gr.Blocks(title=TITLE) as demo:
277
  with gr.Row():
278
  with gr.Column():
279
  gr.Markdown(
280
- "Select profession to visualize and see which clusters and identity groups are most represented in the profession, as well as some examples of generated images below."
281
  )
282
  profession_choice_focus = gr.Dropdown(
283
  choices=professions,
 
9
  pd.options.plotting.backend = "plotly"
10
 
11
 
12
+ TITLE = "Identity Biases in Diffusion Models: Professions"
13
+
14
+ _INTRO = """
15
+ # Identity Biases in Diffusion Models: Professions
16
+
17
+ Explore profession-level social biases in the data from [DiffusionBiasExplorer](https://hf.co/spaces/society-ethics/DiffusionBiasExplorer)!
18
+ This demo leverages the gender and ethnicity representation clusters described in the [companion app](https://hf.co/spaces/society-ethics/DiffusionFaceClustering)
19
+ to analyze social trends in machine-generated visual representations of professions.
20
+ The **Professions Overview** tab lets you compare the distribution over
21
+ [identity clusters](https://hf.co/spaces/society-ethics/DiffusionFaceClustering "Identity clusters identify visual features in the systems' output space correlated with variation of gender and ethnicity in input prompts.")
22
+ across professions for Stable Diffusion and Dalle-2 systems (or aggregated for `All Models`).
23
+ The **Professions Focus** tab provides more details for each of the individual professions, including direct system comparisons and examples of profession images for each cluster.
24
+ This work was done in the scope of the [Stable Bias Project](https://hf.co/spaces/society-ethics/StableBias).
25
+ As you use this demo, please share findings and comments [in the discussions tab](https://hf.co/spaces/society-ethics/DiffusionClustering/discussions)!
26
+ """
27
+
28
+ _ = """
29
+ For example, you can use this tool to investigate:
30
+ - How do each model's representation of professions correlate with the gender ratios reported by the [U.S. Bureau of Labor
31
+ Statistics](https://www.bls.gov/cps/cpsaat11.htm "The reported percentage of women in each profession in the US is indicated in the `Labor Women` column in the Professions Overview tab.")?
32
+ Are social trends reflected, are they exaggerated?
33
+ - Which professions have the starkest differences in how different models represent them?
34
+ """
35
 
36
  professions_dset = load_from_disk("professions")
37
  professions_df = professions_dset.to_pandas()
 
114
  )
115
  df = pd.DataFrame.from_dict(pre_pandas)
116
  prof_plot = df.plot(kind="bar", barmode="group")
117
+ cl_summary_text = f"Profession '{prof_name}':\n"
118
  for cl_id, _ in sorted_cl_scores:
119
  cl_summary_text += f"- {cluster_summaries_by_size[str(num_clusters)][int(cl_id)].replace(' gender terms', '').replace('; ethnicity terms:', ',')} \n"
120
  return (
 
228
 
229
 
230
  with gr.Blocks(title=TITLE) as demo:
231
+ gr.Markdown(_INTRO)
 
 
 
 
 
 
 
232
  gr.HTML(
233
  """<span style="color:red" font-size:smaller>⚠️ DISCLAIMER: the images displayed by this tool were generated by text-to-image systems and may depict offensive stereotypes or contain explicit content.</span>"""
234
  )
235
  with gr.Tab("Professions Overview"):
236
  gr.Markdown(
237
+ """
238
+ Select one or more professions and models from the dropdowns on the left to see which clusters are most representative for this combination.
239
+ Try choosing different numbers of clusters to see if the results change, and then go to the 'Profession Focus' tab to go more in-depth into these results.
240
+ """
241
  )
242
  with gr.Row():
243
  with gr.Column(scale=1):
 
295
  with gr.Row():
296
  with gr.Column():
297
  gr.Markdown(
298
+ "Select a profession to visualize and see which clusters and identity groups are most represented in the profession, as well as some examples of generated images below."
299
  )
300
  profession_choice_focus = gr.Dropdown(
301
  choices=professions,