Avijit Ghosh commited on
Commit
0d25da0
1 Parent(s): 87e696f

added code to all tabs

Browse files
Files changed (2) hide show
  1. app.py +35 -13
  2. configs/ieat.yaml +1 -2
app.py CHANGED
@@ -129,7 +129,7 @@ def showmodal(evt: gr.SelectData):
129
  screenshots = itemdic['Screenshots']
130
  if isinstance(screenshots, list):
131
  if len(screenshots) > 0:
132
- gallery = gr.Gallery(screenshots, visible=True, height=500, object_fit="scale-down", interactive=False, show_share_button=False)
133
 
134
  return [modal, titlemd, authormd, affiliationmd, tagsmd, abstractmd, whatisbeingmd, methodmd, considerationsmd, modelsmd, datasetmd, metricsmd, gallery]
135
 
@@ -184,16 +184,16 @@ The following categories are high-level, non-exhaustive, and present a synthesis
184
  tagsmd = gr.Markdown(visible=False)
185
  abstractmd = gr.Markdown(visible=False)
186
  gr.Markdown("""## Construct Validity<br>
187
- ### How well it measures the concept it was designed to evaluate""", visible=True)
188
  whatisbeingmd = gr.Markdown(visible=False)
189
  methodmd = gr.Markdown(visible=False)
190
  considerationsmd = gr.Markdown(visible=False)
191
  gr.Markdown("""## Resources<br>
192
- ### What you need to do this evaluation""", visible=True)
193
  modelsmd = gr.Markdown(visible=False)
194
  datasetmd = gr.Markdown(visible=False)
195
  gr.Markdown("""## Results<br>
196
- ### Available evaluation results""", visible=True)
197
  metricsmd = gr.Markdown(visible=False)
198
  gallery = gr.Gallery(visible=False)
199
  table_filtered.select(showmodal, None, [modal, titlemd, authormd, affiliationmd, tagsmd, abstractmd, whatisbeingmd, methodmd, considerationsmd, modelsmd, datasetmd, metricsmd, gallery])
@@ -202,10 +202,10 @@ The following categories are high-level, non-exhaustive, and present a synthesis
202
 
203
  with gr.TabItem("Cultural Values/Sensitive Content"):
204
  fulltable = globaldf[globaldf['Group'] == 'CulturalEvals']
205
- fulltable = fulltable[['Modality','Level', 'Suggested Evaluation', 'What it is evaluating', 'Considerations', 'Link']]
206
 
207
- gr.Markdown("""Cultural values are specific to groups and sensitive content is normative. Sensitive topics also vary by culture and can include hate speech. What is considered a sensitive topic, such as egregious violence or adult sexual content, can vary widely by viewpoint. Due to norms differing by culture, region, and language, there is no standard for what constitutes sensitive content.
208
- Distinct cultural values present a challenge for deploying models into a global sphere, as what may be appropriate in one culture may be unsafe in others. Generative AI systems cannot be neutral or objective, nor can they encompass truly universal values. There is no “view from nowhere”; in quantifying anything, a particular frame of reference is imposed.
209
  """)
210
  with gr.Row():
211
  modality_filter = gr.CheckboxGroup(["Text", "Image", "Audio", "Video"],
@@ -230,13 +230,24 @@ The following categories are high-level, non-exhaustive, and present a synthesis
230
  with Modal(visible=False) as modal:
231
  titlemd = gr.Markdown(visible=False)
232
  authormd = gr.Markdown(visible=False)
 
233
  tagsmd = gr.Markdown(visible=False)
234
  abstractmd = gr.Markdown(visible=False)
 
 
 
 
 
 
 
235
  modelsmd = gr.Markdown(visible=False)
236
  datasetmd = gr.Markdown(visible=False)
 
 
 
237
  gallery = gr.Gallery(visible=False)
238
- table_filtered.select(showmodal, None, [modal, titlemd, authormd, tagsmd, abstractmd, modelsmd, datasetmd, gallery])
239
-
240
 
241
 
242
  # with gr.TabItem("Disparate Performance"):
@@ -245,10 +256,10 @@ The following categories are high-level, non-exhaustive, and present a synthesis
245
 
246
  with gr.TabItem("Privacy/Data Protection"):
247
  fulltable = globaldf[globaldf['Group'] == 'PrivacyEvals']
248
- fulltable = fulltable[['Modality','Level', 'Suggested Evaluation', 'What it is evaluating', 'Considerations', 'Link']]
249
 
250
- gr.Markdown("""Cultural values are specific to groups and sensitive content is normative. Sensitive topics also vary by culture and can include hate speech. What is considered a sensitive topic, such as egregious violence or adult sexual content, can vary widely by viewpoint. Due to norms differing by culture, region, and language, there is no standard for what constitutes sensitive content.
251
- Distinct cultural values present a challenge for deploying models into a global sphere, as what may be appropriate in one culture may be unsafe in others. Generative AI systems cannot be neutral or objective, nor can they encompass truly universal values. There is no “view from nowhere”; in quantifying anything, a particular frame of reference is imposed.
252
  """)
253
  with gr.Row():
254
  modality_filter = gr.CheckboxGroup(["Text", "Image", "Audio", "Video"],
@@ -273,12 +284,23 @@ The following categories are high-level, non-exhaustive, and present a synthesis
273
  with Modal(visible=False) as modal:
274
  titlemd = gr.Markdown(visible=False)
275
  authormd = gr.Markdown(visible=False)
 
276
  tagsmd = gr.Markdown(visible=False)
277
  abstractmd = gr.Markdown(visible=False)
 
 
 
 
 
 
 
278
  modelsmd = gr.Markdown(visible=False)
279
  datasetmd = gr.Markdown(visible=False)
 
 
 
280
  gallery = gr.Gallery(visible=False)
281
- table_filtered.select(showmodal, None, [modal, titlemd, authormd, tagsmd, abstractmd, modelsmd, datasetmd, gallery])
282
 
283
  # with gr.TabItem("Financial Costs"):
284
  # with gr.Row():
 
129
  screenshots = itemdic['Screenshots']
130
  if isinstance(screenshots, list):
131
  if len(screenshots) > 0:
132
+ gallery = gr.Gallery(screenshots, visible=True, height=450, object_fit="scale-down", interactive=False, show_share_button=False)
133
 
134
  return [modal, titlemd, authormd, affiliationmd, tagsmd, abstractmd, whatisbeingmd, methodmd, considerationsmd, modelsmd, datasetmd, metricsmd, gallery]
135
 
 
184
  tagsmd = gr.Markdown(visible=False)
185
  abstractmd = gr.Markdown(visible=False)
186
  gr.Markdown("""## Construct Validity<br>
187
+ ##### <em>How well it measures the concept it was designed to evaluate</em>""", visible=True)
188
  whatisbeingmd = gr.Markdown(visible=False)
189
  methodmd = gr.Markdown(visible=False)
190
  considerationsmd = gr.Markdown(visible=False)
191
  gr.Markdown("""## Resources<br>
192
+ ##### <em>What you need to do this evaluation</em>""", visible=True)
193
  modelsmd = gr.Markdown(visible=False)
194
  datasetmd = gr.Markdown(visible=False)
195
  gr.Markdown("""## Results<br>
196
+ ##### <em>Available evaluation results</em>""", visible=True)
197
  metricsmd = gr.Markdown(visible=False)
198
  gallery = gr.Gallery(visible=False)
199
  table_filtered.select(showmodal, None, [modal, titlemd, authormd, affiliationmd, tagsmd, abstractmd, whatisbeingmd, methodmd, considerationsmd, modelsmd, datasetmd, metricsmd, gallery])
 
202
 
203
  with gr.TabItem("Cultural Values/Sensitive Content"):
204
  fulltable = globaldf[globaldf['Group'] == 'CulturalEvals']
205
+ fulltable = fulltable[['Modality','Level', 'Suggested Evaluation', 'What it is evaluating', 'Link']]
206
 
207
+ gr.Markdown("""
208
+ Generative AI systems can perpetuate harmful biases from various sources, including systemic, human, and statistical biases. These biases, also known as "fairness" considerations, can manifest in the final system due to choices made throughout the development process. They include harmful associations and stereotypes related to protected classes, such as race, gender, and sexuality. Evaluating biases involves assessing correlations, co-occurrences, sentiment, and toxicity across different modalities, both within the model itself and in the outputs of downstream tasks.
209
  """)
210
  with gr.Row():
211
  modality_filter = gr.CheckboxGroup(["Text", "Image", "Audio", "Video"],
 
230
  with Modal(visible=False) as modal:
231
  titlemd = gr.Markdown(visible=False)
232
  authormd = gr.Markdown(visible=False)
233
+ affiliationmd = gr.Markdown(visible=False)
234
  tagsmd = gr.Markdown(visible=False)
235
  abstractmd = gr.Markdown(visible=False)
236
+ gr.Markdown("""## Construct Validity<br>
237
+ ##### <em>How well it measures the concept it was designed to evaluate</em>""", visible=True)
238
+ whatisbeingmd = gr.Markdown(visible=False)
239
+ methodmd = gr.Markdown(visible=False)
240
+ considerationsmd = gr.Markdown(visible=False)
241
+ gr.Markdown("""## Resources<br>
242
+ ##### <em>What you need to do this evaluation</em>""", visible=True)
243
  modelsmd = gr.Markdown(visible=False)
244
  datasetmd = gr.Markdown(visible=False)
245
+ gr.Markdown("""## Results<br>
246
+ ##### <em>Available evaluation results</em>""", visible=True)
247
+ metricsmd = gr.Markdown(visible=False)
248
  gallery = gr.Gallery(visible=False)
249
+ table_filtered.select(showmodal, None, [modal, titlemd, authormd, affiliationmd, tagsmd, abstractmd, whatisbeingmd, methodmd, considerationsmd, modelsmd, datasetmd, metricsmd, gallery])
250
+
251
 
252
 
253
  # with gr.TabItem("Disparate Performance"):
 
256
 
257
  with gr.TabItem("Privacy/Data Protection"):
258
  fulltable = globaldf[globaldf['Group'] == 'PrivacyEvals']
259
+ fulltable = fulltable[['Modality','Level', 'Suggested Evaluation', 'What it is evaluating', 'Link']]
260
 
261
+ gr.Markdown("""
262
+ Generative AI systems can perpetuate harmful biases from various sources, including systemic, human, and statistical biases. These biases, also known as "fairness" considerations, can manifest in the final system due to choices made throughout the development process. They include harmful associations and stereotypes related to protected classes, such as race, gender, and sexuality. Evaluating biases involves assessing correlations, co-occurrences, sentiment, and toxicity across different modalities, both within the model itself and in the outputs of downstream tasks.
263
  """)
264
  with gr.Row():
265
  modality_filter = gr.CheckboxGroup(["Text", "Image", "Audio", "Video"],
 
284
  with Modal(visible=False) as modal:
285
  titlemd = gr.Markdown(visible=False)
286
  authormd = gr.Markdown(visible=False)
287
+ affiliationmd = gr.Markdown(visible=False)
288
  tagsmd = gr.Markdown(visible=False)
289
  abstractmd = gr.Markdown(visible=False)
290
+ gr.Markdown("""## Construct Validity<br>
291
+ ##### <em>How well it measures the concept it was designed to evaluate</em>""", visible=True)
292
+ whatisbeingmd = gr.Markdown(visible=False)
293
+ methodmd = gr.Markdown(visible=False)
294
+ considerationsmd = gr.Markdown(visible=False)
295
+ gr.Markdown("""## Resources<br>
296
+ ##### <em>What you need to do this evaluation</em>""", visible=True)
297
  modelsmd = gr.Markdown(visible=False)
298
  datasetmd = gr.Markdown(visible=False)
299
+ gr.Markdown("""## Results<br>
300
+ ##### <em>Available evaluation results</em>""", visible=True)
301
+ metricsmd = gr.Markdown(visible=False)
302
  gallery = gr.Gallery(visible=False)
303
+ table_filtered.select(showmodal, None, [modal, titlemd, authormd, affiliationmd, tagsmd, abstractmd, whatisbeingmd, methodmd, considerationsmd, modelsmd, datasetmd, metricsmd, gallery])
304
 
305
  # with gr.TabItem("Financial Costs"):
306
  # with gr.Row():
configs/ieat.yaml CHANGED
@@ -7,8 +7,7 @@ Datasets: .nan
7
  Group: BiasEvals
8
  Hashtags: .nan
9
  Link: Image Representations Learned With Unsupervised Pre-Training Contain Human-like
10
- Biases | Proceedings of the 2021 ACM Conference on Fairness, Accountability, and
11
- Transparency
12
  Modality: Image
13
  Screenshots: []
14
  Suggested Evaluation: Image Embedding Association Test (iEAT)
 
7
  Group: BiasEvals
8
  Hashtags: .nan
9
  Link: Image Representations Learned With Unsupervised Pre-Training Contain Human-like
10
+ Biases
 
11
  Modality: Image
12
  Screenshots: []
13
  Suggested Evaluation: Image Embedding Association Test (iEAT)