lewtun HF staff commited on
Commit
1e71c79
β€’
2 Parent(s): 39fb268 b36527b

Merge pull request #25 from huggingface/add-username

Browse files
Files changed (1) hide show
  1. app.py +7 -8
app.py CHANGED
@@ -27,16 +27,16 @@ AUTOTRAIN_USERNAME = os.getenv("AUTOTRAIN_USERNAME")
27
  AUTOTRAIN_BACKEND_API = os.getenv("AUTOTRAIN_BACKEND_API")
28
  DATASETS_PREVIEW_API = os.getenv("DATASETS_PREVIEW_API")
29
 
30
-
31
  TASK_TO_ID = {
 
 
32
  "binary_classification": 1,
33
  "multi_class_classification": 2,
34
  "entity_extraction": 4,
35
  "extractive_question_answering": 5,
36
  "translation": 6,
37
  "summarization": 8,
38
- "image_binary_classification": 17,
39
- "image_multi_class_classification": 18,
40
  }
41
 
42
  TASK_TO_DEFAULT_METRICS = {
@@ -434,6 +434,8 @@ with st.form(key="form"):
434
  )
435
  print("INFO -- Selected models after filter:", selected_models)
436
 
 
 
437
  submit_button = st.form_submit_button("Evaluate models πŸš€")
438
 
439
  if submit_button:
@@ -455,10 +457,7 @@ with st.form(key="form"):
455
  "num_instances": 1,
456
  "disk_size_gb": 150,
457
  },
458
- "evaluation": {
459
- "metrics": selected_metrics,
460
- "models": selected_models,
461
- },
462
  },
463
  }
464
  print(f"INFO -- Payload: {project_payload}")
@@ -496,7 +495,7 @@ with st.form(key="form"):
496
  ).json()
497
  print(f"INFO -- AutoTrain job response: {train_json_resp}")
498
  if train_json_resp["success"]:
499
- st.success(f"βœ… Successfully submitted evaluation job with project name {project_id}")
500
  st.markdown(
501
  f"""
502
  Evaluation can take up to 1 hour to complete, so grab a β˜• or 🍡 while you wait:
 
27
  AUTOTRAIN_BACKEND_API = os.getenv("AUTOTRAIN_BACKEND_API")
28
  DATASETS_PREVIEW_API = os.getenv("DATASETS_PREVIEW_API")
29
 
30
+ # Put image tasks on top
31
  TASK_TO_ID = {
32
+ "image_binary_classification": 17,
33
+ "image_multi_class_classification": 18,
34
  "binary_classification": 1,
35
  "multi_class_classification": 2,
36
  "entity_extraction": 4,
37
  "extractive_question_answering": 5,
38
  "translation": 6,
39
  "summarization": 8,
 
 
40
  }
41
 
42
  TASK_TO_DEFAULT_METRICS = {
 
434
  )
435
  print("INFO -- Selected models after filter:", selected_models)
436
 
437
+ hf_username = st.text_input("Enter your πŸ€— Hub username to be notified when the evaluation is finished")
438
+
439
  submit_button = st.form_submit_button("Evaluate models πŸš€")
440
 
441
  if submit_button:
 
457
  "num_instances": 1,
458
  "disk_size_gb": 150,
459
  },
460
+ "evaluation": {"metrics": selected_metrics, "models": selected_models, "hf_username": hf_username},
 
 
 
461
  },
462
  }
463
  print(f"INFO -- Payload: {project_payload}")
 
495
  ).json()
496
  print(f"INFO -- AutoTrain job response: {train_json_resp}")
497
  if train_json_resp["success"]:
498
+ st.success(f"βœ… Successfully submitted evaluation job with project ID {project_id}")
499
  st.markdown(
500
  f"""
501
  Evaluation can take up to 1 hour to complete, so grab a β˜• or 🍡 while you wait: