Avijit Ghosh commited on
Commit
15af994
1 Parent(s): a8356e2

More prettification

Browse files
Files changed (1) hide show
  1. app.py +16 -10
app.py CHANGED
@@ -72,10 +72,11 @@ def showmodal(evt: gr.SelectData):
72
  authormd = gr.Markdown("",visible=False)
73
  tagsmd = gr.Markdown("",visible=False)
74
  abstractmd = gr.Markdown("",visible=False)
 
75
  modelsmd = gr.Markdown("",visible=False)
76
  datasetmd = gr.Markdown("",visible=False)
77
  gallery = gr.Gallery([],visible=False)
78
- if evt.index[1] == 5:
79
  modal = Modal(visible=True)
80
  itemdic = metadata_dict[evt.value]
81
 
@@ -98,6 +99,9 @@ def showmodal(evt: gr.SelectData):
98
 
99
  if pd.notnull(itemdic['Abstract']):
100
  abstractmd = gr.Markdown(itemdic['Abstract'],visible=True)
 
 
 
101
 
102
  if pd.notnull(itemdic['Datasets']):
103
  datasetmd = gr.Markdown('#### [Dataset]('+itemdic['Datasets']+')',visible=True)
@@ -107,7 +111,7 @@ def showmodal(evt: gr.SelectData):
107
  if len(screenshots) > 0:
108
  gallery = gr.Gallery(screenshots, visible=True, height=500, object_fit="scale-down", interactive=False, show_share_button=False)
109
 
110
- return [modal, titlemd, authormd, tagsmd, abstractmd, modelsmd, datasetmd, gallery]
111
 
112
  with gr.Blocks(title = "Social Impact Measurement V2", css=custom_css, theme=gr.themes.Base()) as demo: #theme=gr.themes.Soft(),
113
  # create tabs for the app, moving the current table to one titled "rewardbench" and the benchmark_text to a tab called "About"
@@ -128,7 +132,7 @@ The following categories are high-level, non-exhaustive, and present a synthesis
128
  with gr.Tabs(elem_classes="tab-buttons") as tabs1:
129
  with gr.TabItem("Bias/Stereolevels"):
130
  fulltable = globaldf[globaldf['Group'] == 'BiasEvals']
131
- fulltable = fulltable[['Modality','Level', 'Suggested Evaluation', 'What it is evaluating', 'Considerations', 'Link']]
132
 
133
  gr.Markdown("""
134
  Generative AI systems can perpetuate harmful biases from various sources, including systemic, human, and statistical biases. These biases, also known as "fairness" considerations, can manifest in the final system due to choices made throughout the development process. They include harmful associations and stereolevels related to protected classes, such as race, gender, and sexuality. Evaluating biases involves assessing correlations, co-occurrences, sentiment, and toxicity across different modalities, both within the model itself and in the outputs of downstream tasks.
@@ -158,16 +162,18 @@ The following categories are high-level, non-exhaustive, and present a synthesis
158
  authormd = gr.Markdown(visible=False)
159
  tagsmd = gr.Markdown(visible=False)
160
  abstractmd = gr.Markdown(visible=False)
161
- gr.Markdown("## Construct Validity", visible=True)
162
- gr.Markdown("### What it is evaluating", visible=True)
163
- gr.Markdown('## Resources', visible=True)
164
- gr.Markdown('### What you need to do this evaluation', visible=True)
 
 
165
  modelsmd = gr.Markdown(visible=False)
166
  datasetmd = gr.Markdown(visible=False)
167
- gr.Markdown("## Results", visible=True)
168
- gr.Markdown("### Metrics", visible=True)
169
  gallery = gr.Gallery(visible=False)
170
- table_filtered.select(showmodal, None, [modal, titlemd, authormd, tagsmd, abstractmd, modelsmd, datasetmd, gallery])
171
 
172
 
173
 
 
72
  authormd = gr.Markdown("",visible=False)
73
  tagsmd = gr.Markdown("",visible=False)
74
  abstractmd = gr.Markdown("",visible=False)
75
+ considerationsmd = gr.Markdown("",visible=False)
76
  modelsmd = gr.Markdown("",visible=False)
77
  datasetmd = gr.Markdown("",visible=False)
78
  gallery = gr.Gallery([],visible=False)
79
+ if evt.index[1] == 4:
80
  modal = Modal(visible=True)
81
  itemdic = metadata_dict[evt.value]
82
 
 
99
 
100
  if pd.notnull(itemdic['Abstract']):
101
  abstractmd = gr.Markdown(itemdic['Abstract'],visible=True)
102
+
103
+ if pd.notnull(itemdic['Considerations']):
104
+ considerationsmd = gr.Markdown('<strong>Considerations: </strong>'+ itemdic['Considerations'],visible=True)
105
 
106
  if pd.notnull(itemdic['Datasets']):
107
  datasetmd = gr.Markdown('#### [Dataset]('+itemdic['Datasets']+')',visible=True)
 
111
  if len(screenshots) > 0:
112
  gallery = gr.Gallery(screenshots, visible=True, height=500, object_fit="scale-down", interactive=False, show_share_button=False)
113
 
114
+ return [modal, titlemd, authormd, tagsmd, abstractmd, considerationsmd, modelsmd, datasetmd, gallery]
115
 
116
  with gr.Blocks(title = "Social Impact Measurement V2", css=custom_css, theme=gr.themes.Base()) as demo: #theme=gr.themes.Soft(),
117
  # create tabs for the app, moving the current table to one titled "rewardbench" and the benchmark_text to a tab called "About"
 
132
  with gr.Tabs(elem_classes="tab-buttons") as tabs1:
133
  with gr.TabItem("Bias/Stereolevels"):
134
  fulltable = globaldf[globaldf['Group'] == 'BiasEvals']
135
+ fulltable = fulltable[['Modality','Level', 'Suggested Evaluation', 'What it is evaluating', 'Link']]
136
 
137
  gr.Markdown("""
138
  Generative AI systems can perpetuate harmful biases from various sources, including systemic, human, and statistical biases. These biases, also known as "fairness" considerations, can manifest in the final system due to choices made throughout the development process. They include harmful associations and stereolevels related to protected classes, such as race, gender, and sexuality. Evaluating biases involves assessing correlations, co-occurrences, sentiment, and toxicity across different modalities, both within the model itself and in the outputs of downstream tasks.
 
162
  authormd = gr.Markdown(visible=False)
163
  tagsmd = gr.Markdown(visible=False)
164
  abstractmd = gr.Markdown(visible=False)
165
+ gr.Markdown("""## Construct Validity<br>
166
+ ### How well it measures the concept it was designed to evaluate""", visible=True)
167
+ # gr.Markdown("### What it is evaluating", visible=True)
168
+ considerationsmd = gr.Markdown(visible=False)
169
+ gr.Markdown("""## Resources<br>
170
+ ### What you need to do this evaluation""", visible=True)
171
  modelsmd = gr.Markdown(visible=False)
172
  datasetmd = gr.Markdown(visible=False)
173
+ gr.Markdown("""## Results<br>
174
+ ### Available evaluation results""", visible=True)
175
  gallery = gr.Gallery(visible=False)
176
+ table_filtered.select(showmodal, None, [modal, titlemd, authormd, tagsmd, abstractmd, considerationsmd, modelsmd, datasetmd, gallery])
177
 
178
 
179