lewtun HF staff commited on
Commit
0779c9b
β€’
1 Parent(s): 79d85b6

Fix quality

Browse files
Files changed (2) hide show
  1. app.py +35 -15
  2. utils.py +12 -3
app.py CHANGED
@@ -34,9 +34,9 @@ TASK_TO_ID = {
34
  SUPPORTED_TASKS = list(TASK_TO_ID.keys())
35
 
36
 
37
- ###########
38
- ### APP ###
39
- ###########
40
  st.title("Evaluation as a Service")
41
  st.markdown(
42
  """
@@ -65,18 +65,22 @@ if metadata is None:
65
  st.warning("No evaluation metadata found. Please configure the evaluation job below.")
66
 
67
  with st.expander("Advanced configuration"):
68
- ## Select task
69
  selected_task = st.selectbox(
70
  "Select a task",
71
  SUPPORTED_TASKS,
72
  index=SUPPORTED_TASKS.index(metadata[0]["task_id"]) if metadata is not None else 0,
73
  )
74
- ### Select config
75
  configs = get_dataset_config_names(selected_dataset)
76
  selected_config = st.selectbox("Select a config", configs)
77
 
78
- ## Select splits
79
- splits_resp = http_get(path="/splits", domain=DATASETS_PREVIEW_API, params={"dataset": selected_dataset})
 
 
 
 
80
  if splits_resp.status_code == 200:
81
  split_names = []
82
  all_splits = splits_resp.json()
@@ -90,11 +94,15 @@ with st.expander("Advanced configuration"):
90
  index=split_names.index(metadata[0]["splits"]["eval_split"]) if metadata is not None else 0,
91
  )
92
 
93
- ## Select columns
94
  rows_resp = http_get(
95
  path="/rows",
96
  domain=DATASETS_PREVIEW_API,
97
- params={"dataset": selected_dataset, "config": selected_config, "split": selected_split},
 
 
 
 
98
  ).json()
99
  col_names = list(pd.json_normalize(rows_resp["rows"][0]["row"]).columns)
100
 
@@ -136,7 +144,7 @@ with st.expander("Advanced configuration"):
136
  st.markdown("`tags` column")
137
  with col2:
138
  tokens_col = st.selectbox(
139
- "This column should contain the parts of the text (as an array of tokens) you want to assign labels to",
140
  col_names,
141
  index=col_names.index(get_key(metadata[0]["col_mapping"], "tokens")) if metadata is not None else 0,
142
  )
@@ -247,7 +255,11 @@ with st.form(key="form"):
247
  print("Selected models:", selected_models)
248
 
249
  selected_models = filter_evaluated_models(
250
- selected_models, selected_task, selected_dataset, selected_config, selected_split
 
 
 
 
251
  )
252
  print("Selected models:", selected_models)
253
 
@@ -278,7 +290,10 @@ with st.form(key="form"):
278
  }
279
  print(f"Payload: {payload}")
280
  project_json_resp = http_post(
281
- path="/projects/create", payload=payload, token=HF_TOKEN, domain=AUTOTRAIN_BACKEND_API
 
 
 
282
  ).json()
283
  print(project_json_resp)
284
 
@@ -293,7 +308,11 @@ with st.form(key="form"):
293
  payload=payload,
294
  token=HF_TOKEN,
295
  domain=AUTOTRAIN_BACKEND_API,
296
- params={"type": "dataset", "config_name": selected_config, "split_name": selected_split},
 
 
 
 
297
  ).json()
298
  print(data_json_resp)
299
  if data_json_resp["download_status"] == 1:
@@ -306,10 +325,11 @@ with st.form(key="form"):
306
  if train_json_resp["success"]:
307
  st.success(f"βœ… Successfully submitted evaluation job with project ID {project_id}")
308
  st.markdown(
309
- f"""
310
  Evaluation takes appoximately 1 hour to complete, so grab a β˜• or 🍡 while you wait:
311
 
312
- * πŸ“Š Click [here](https://huggingface.co/spaces/autoevaluate/leaderboards) to view the results from your submission
 
313
  """
314
  )
315
  else:
34
  SUPPORTED_TASKS = list(TASK_TO_ID.keys())
35
 
36
 
37
+ #######
38
+ # APP #
39
+ #######
40
  st.title("Evaluation as a Service")
41
  st.markdown(
42
  """
65
  st.warning("No evaluation metadata found. Please configure the evaluation job below.")
66
 
67
  with st.expander("Advanced configuration"):
68
+ # Select task
69
  selected_task = st.selectbox(
70
  "Select a task",
71
  SUPPORTED_TASKS,
72
  index=SUPPORTED_TASKS.index(metadata[0]["task_id"]) if metadata is not None else 0,
73
  )
74
+ # Select config
75
  configs = get_dataset_config_names(selected_dataset)
76
  selected_config = st.selectbox("Select a config", configs)
77
 
78
+ # Select splits
79
+ splits_resp = http_get(
80
+ path="/splits",
81
+ domain=DATASETS_PREVIEW_API,
82
+ params={"dataset": selected_dataset},
83
+ )
84
  if splits_resp.status_code == 200:
85
  split_names = []
86
  all_splits = splits_resp.json()
94
  index=split_names.index(metadata[0]["splits"]["eval_split"]) if metadata is not None else 0,
95
  )
96
 
97
+ # Select columns
98
  rows_resp = http_get(
99
  path="/rows",
100
  domain=DATASETS_PREVIEW_API,
101
+ params={
102
+ "dataset": selected_dataset,
103
+ "config": selected_config,
104
+ "split": selected_split,
105
+ },
106
  ).json()
107
  col_names = list(pd.json_normalize(rows_resp["rows"][0]["row"]).columns)
108
 
144
  st.markdown("`tags` column")
145
  with col2:
146
  tokens_col = st.selectbox(
147
+ "This column should contain the array of tokens",
148
  col_names,
149
  index=col_names.index(get_key(metadata[0]["col_mapping"], "tokens")) if metadata is not None else 0,
150
  )
255
  print("Selected models:", selected_models)
256
 
257
  selected_models = filter_evaluated_models(
258
+ selected_models,
259
+ selected_task,
260
+ selected_dataset,
261
+ selected_config,
262
+ selected_split,
263
  )
264
  print("Selected models:", selected_models)
265
 
290
  }
291
  print(f"Payload: {payload}")
292
  project_json_resp = http_post(
293
+ path="/projects/create",
294
+ payload=payload,
295
+ token=HF_TOKEN,
296
+ domain=AUTOTRAIN_BACKEND_API,
297
  ).json()
298
  print(project_json_resp)
299
 
308
  payload=payload,
309
  token=HF_TOKEN,
310
  domain=AUTOTRAIN_BACKEND_API,
311
+ params={
312
+ "type": "dataset",
313
+ "config_name": selected_config,
314
+ "split_name": selected_split,
315
+ },
316
  ).json()
317
  print(data_json_resp)
318
  if data_json_resp["download_status"] == 1:
325
  if train_json_resp["success"]:
326
  st.success(f"βœ… Successfully submitted evaluation job with project ID {project_id}")
327
  st.markdown(
328
+ """
329
  Evaluation takes appoximately 1 hour to complete, so grab a β˜• or 🍡 while you wait:
330
 
331
+ * πŸ“Š Click [here](https://huggingface.co/spaces/autoevaluate/leaderboards) to view the \
332
+ results from your submission
333
  """
334
  )
335
  else:
utils.py CHANGED
@@ -27,7 +27,11 @@ def http_post(path: str, token: str, payload=None, domain: str = None, params=No
27
  """HTTP POST request to the AutoNLP API, raises UnreachableAPIError if the API cannot be reached"""
28
  try:
29
  response = requests.post(
30
- url=domain + path, json=payload, headers=get_auth_headers(token=token), allow_redirects=True, params=params
 
 
 
 
31
  )
32
  except requests.exceptions.ConnectionError:
33
  print("❌ Failed to reach AutoNLP API, check your internet connection")
@@ -39,7 +43,10 @@ def http_get(path: str, domain: str, token: str = None, params: dict = None) ->
39
  """HTTP POST request to the AutoNLP API, raises UnreachableAPIError if the API cannot be reached"""
40
  try:
41
  response = requests.get(
42
- url=domain + path, headers=get_auth_headers(token=token), allow_redirects=True, params=params
 
 
 
43
  )
44
  except requests.exceptions.ConnectionError:
45
  print("❌ Failed to reach AutoNLP API, check your internet connection")
@@ -58,7 +65,9 @@ def get_metadata(dataset_name: str) -> Union[Dict, None]:
58
  def get_compatible_models(task, dataset_name):
59
  # TODO: relax filter on PyTorch models once supported in AutoTrain
60
  filt = ModelFilter(
61
- task=AUTOTRAIN_TASK_TO_HUB_TASK[task], trained_dataset=dataset_name, library=["transformers", "pytorch"]
 
 
62
  )
63
  compatible_models = api.list_models(filter=filt)
64
  return [model.modelId for model in compatible_models]
27
  """HTTP POST request to the AutoNLP API, raises UnreachableAPIError if the API cannot be reached"""
28
  try:
29
  response = requests.post(
30
+ url=domain + path,
31
+ json=payload,
32
+ headers=get_auth_headers(token=token),
33
+ allow_redirects=True,
34
+ params=params,
35
  )
36
  except requests.exceptions.ConnectionError:
37
  print("❌ Failed to reach AutoNLP API, check your internet connection")
43
  """HTTP POST request to the AutoNLP API, raises UnreachableAPIError if the API cannot be reached"""
44
  try:
45
  response = requests.get(
46
+ url=domain + path,
47
+ headers=get_auth_headers(token=token),
48
+ allow_redirects=True,
49
+ params=params,
50
  )
51
  except requests.exceptions.ConnectionError:
52
  print("❌ Failed to reach AutoNLP API, check your internet connection")
65
  def get_compatible_models(task, dataset_name):
66
  # TODO: relax filter on PyTorch models once supported in AutoTrain
67
  filt = ModelFilter(
68
+ task=AUTOTRAIN_TASK_TO_HUB_TASK[task],
69
+ trained_dataset=dataset_name,
70
+ library=["transformers", "pytorch"],
71
  )
72
  compatible_models = api.list_models(filter=filt)
73
  return [model.modelId for model in compatible_models]