lewtun HF staff commited on
Commit
24b0def
β€’
1 Parent(s): 984f58b

Fix QA mapping

Browse files
Files changed (1) hide show
  1. app.py +11 -13
app.py CHANGED
@@ -30,7 +30,7 @@ TASK_TO_ID = {
30
  "summarization": 8,
31
  }
32
 
33
- supported_tasks = list(TASK_TO_ID.keys())
34
 
35
 
36
  ###########
@@ -58,8 +58,8 @@ selected_dataset = st.selectbox("Select a dataset", all_datasets, index=all_data
58
  st.experimental_set_query_params(**{"dataset": [selected_dataset]})
59
 
60
 
61
- # TODO: In general this will be a list of multiple configs => need to generalise logic here
62
  metadata = get_metadata(selected_dataset)
 
63
  if metadata is None:
64
  st.warning("No evaluation metadata found. Please configure the evaluation job below.")
65
 
@@ -67,8 +67,8 @@ with st.expander("Advanced configuration"):
67
  ## Select task
68
  selected_task = st.selectbox(
69
  "Select a task",
70
- supported_tasks,
71
- index=supported_tasks.index(metadata[0]["task_id"]) if metadata is not None else 0,
72
  )
73
  ### Select config
74
  configs = get_dataset_config_names(selected_dataset)
@@ -192,6 +192,8 @@ with st.expander("Advanced configuration"):
192
  col_mapping[target_col] = "target"
193
 
194
  elif selected_task == "extractive_question_answering":
 
 
195
  with col1:
196
  st.markdown("`context` column")
197
  st.text("")
@@ -213,26 +215,22 @@ with st.expander("Advanced configuration"):
213
  context_col = st.selectbox(
214
  "This column should contain the question's context",
215
  col_names,
216
- index=col_names.index(get_key(metadata[0]["col_mapping"], "context")) if metadata is not None else 0,
217
  )
218
  question_col = st.selectbox(
219
  "This column should contain the question to be answered, given the context",
220
  col_names,
221
- index=col_names.index(get_key(metadata[0]["col_mapping"], "question")) if metadata is not None else 0,
222
  )
223
  answers_text_col = st.selectbox(
224
  "This column should contain example answers to the question, extracted from the context",
225
  col_names,
226
- index=col_names.index(get_key(metadata[0]["col_mapping"], "answers.text"))
227
- if metadata is not None
228
- else 0,
229
  )
230
  answers_start_col = st.selectbox(
231
  "This column should contain the indices in the context of the first character of each answers.text",
232
  col_names,
233
- index=col_names.index(get_key(metadata[0]["col_mapping"], "answers.answer_start"))
234
- if metadata is not None
235
- else 0,
236
  )
237
  col_mapping[context_col] = "context"
238
  col_mapping[question_col] = "question"
@@ -302,7 +300,7 @@ with st.form(key="form"):
302
  f"""
303
  Evaluation takes appoximately 1 hour to complete, so grab a β˜• or 🍡 while you wait:
304
 
305
- * πŸ“Š Click [here](https://huggingface.co/spaces/huggingface/leaderboards) to view the results from your submission
306
  """
307
  )
308
  else:
 
30
  "summarization": 8,
31
  }
32
 
33
+ SUPPORTED_TASKS = list(TASK_TO_ID.keys())
34
 
35
 
36
  ###########
 
58
  st.experimental_set_query_params(**{"dataset": [selected_dataset]})
59
 
60
 
 
61
  metadata = get_metadata(selected_dataset)
62
+ print(metadata)
63
  if metadata is None:
64
  st.warning("No evaluation metadata found. Please configure the evaluation job below.")
65
 
 
67
  ## Select task
68
  selected_task = st.selectbox(
69
  "Select a task",
70
+ SUPPORTED_TASKS,
71
+ index=SUPPORTED_TASKS.index(metadata[0]["task_id"]) if metadata is not None else 0,
72
  )
73
  ### Select config
74
  configs = get_dataset_config_names(selected_dataset)
 
192
  col_mapping[target_col] = "target"
193
 
194
  elif selected_task == "extractive_question_answering":
195
+ col_mapping = metadata[0]["col_mapping"]
196
+ col_mapping = {k.replace("-", "."): v.replace("-", ".") for k, v in col_mapping.items()}
197
  with col1:
198
  st.markdown("`context` column")
199
  st.text("")
 
215
  context_col = st.selectbox(
216
  "This column should contain the question's context",
217
  col_names,
218
+ index=col_names.index(get_key(col_mapping, "context")) if metadata is not None else 0,
219
  )
220
  question_col = st.selectbox(
221
  "This column should contain the question to be answered, given the context",
222
  col_names,
223
+ index=col_names.index(get_key(col_mapping, "question")) if metadata is not None else 0,
224
  )
225
  answers_text_col = st.selectbox(
226
  "This column should contain example answers to the question, extracted from the context",
227
  col_names,
228
+ index=col_names.index(get_key(col_mapping, "answers.text")) if metadata is not None else 0,
 
 
229
  )
230
  answers_start_col = st.selectbox(
231
  "This column should contain the indices in the context of the first character of each answers.text",
232
  col_names,
233
+ index=col_names.index(get_key(col_mapping, "answers.answer_start")) if metadata is not None else 0,
 
 
234
  )
235
  col_mapping[context_col] = "context"
236
  col_mapping[question_col] = "question"
 
300
  f"""
301
  Evaluation takes appoximately 1 hour to complete, so grab a β˜• or 🍡 while you wait:
302
 
303
+ * πŸ“Š Click [here](https://huggingface.co/spaces/autoevaluate/leaderboards) to view the results from your submission
304
  """
305
  )
306
  else: