lewtun HF staff commited on
Commit
f417916
β€’
1 Parent(s): a1bf704

Add redirect to leaderboard URL

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -26,7 +26,7 @@ TASK_TO_ID = {
26
  # "multi_label_classification": 3, # Not fully supported in AutoTrain
27
  "entity_extraction": 4,
28
  "extractive_question_answering": 5,
29
- "translation": 6,
30
  "summarization": 8,
31
  }
32
 
@@ -40,7 +40,7 @@ st.title("Evaluation as a Service")
40
  st.markdown(
41
  """
42
  Welcome to Hugging Face's Evaluation as a Service! This application allows
43
- you to evaluate any πŸ€— Transformers model with a dataset on the Hub. Please
44
  select the dataset and configuration below. The results of your evaluation
45
  will be displayed on the public leaderboard
46
  [here](https://huggingface.co/spaces/autoevaluate/leaderboards).
@@ -324,11 +324,11 @@ with st.form(key="form"):
324
  if train_json_resp["success"]:
325
  st.success(f"βœ… Successfully submitted evaluation job with project ID {project_id}")
326
  st.markdown(
327
- """
328
  Evaluation takes appoximately 1 hour to complete, so grab a β˜• or 🍡 while you wait:
329
 
330
- * πŸ“Š Click [here](https://huggingface.co/spaces/autoevaluate/leaderboards) to view the \
331
- results from your submission
332
  """
333
  )
334
  else:
 
26
  # "multi_label_classification": 3, # Not fully supported in AutoTrain
27
  "entity_extraction": 4,
28
  "extractive_question_answering": 5,
29
+ # "translation": 6, $ Not fully supported in AutoTrain evaluation
30
  "summarization": 8,
31
  }
32
 
 
40
  st.markdown(
41
  """
42
  Welcome to Hugging Face's Evaluation as a Service! This application allows
43
+ you to evaluate πŸ€— Transformers models with a dataset on the Hub. Please
44
  select the dataset and configuration below. The results of your evaluation
45
  will be displayed on the public leaderboard
46
  [here](https://huggingface.co/spaces/autoevaluate/leaderboards).
 
324
  if train_json_resp["success"]:
325
  st.success(f"βœ… Successfully submitted evaluation job with project ID {project_id}")
326
  st.markdown(
327
+ f"""
328
  Evaluation takes appoximately 1 hour to complete, so grab a β˜• or 🍡 while you wait:
329
 
330
+ πŸ“Š Click [here](https://hf.co/spaces/autoevaluate/leaderboards?dataset={selected_dataset}) \
331
+ to view the results from your submission
332
  """
333
  )
334
  else: