lewtun HF staff commited on
Commit
aff31fa
β€’
2 Parent(s): 6348497 b77ab00

Merge pull request #49 from huggingface/project-approval

Browse files
.env.example DELETED
@@ -1,4 +0,0 @@
1
- AUTOTRAIN_USERNAME=autoevaluator # The bot that authors evaluation jobs
2
- HF_TOKEN=hf_xxx # An API token of the `autoevaluator` user
3
- AUTOTRAIN_BACKEND_API=https://api-staging.autotrain.huggingface.co # The AutoTrain backend to send jobs to. Use https://api.autotrain.huggingface.co for prod
4
- DATASETS_PREVIEW_API=https://datasets-server.huggingface.co # The API to grab dataset information from
 
 
 
 
.env.template ADDED
@@ -0,0 +1,4 @@
 
 
 
 
1
+ AUTOTRAIN_USERNAME=autoevaluator # The bot or user that authors evaluation jobs
2
+ HF_TOKEN=hf_xxx # An API token of the `autoevaluator` user
3
+ AUTOTRAIN_BACKEND_API=https://api-staging.autotrain.huggingface.co # The AutoTrain backend to send jobs to. Use https://api.autotrain.huggingface.co for prod or http://localhost:8000 for local development
4
+ DATASETS_PREVIEW_API=https://datasets-server.huggingface.co # The API to grab dataset information from
.github/workflows/run_evaluation_jobs.yml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Start evaluation jobs
2
+
3
+ on:
4
+ schedule:
5
+ - cron: '*/15 * * * *' # Start evaluations every 15th minute
6
+
7
+ jobs:
8
+
9
+ build:
10
+ runs-on: ubuntu-latest
11
+
12
+ steps:
13
+ - name: Checkout code
14
+ uses: actions/checkout@v2
15
+
16
+ - name: Setup Python Environment
17
+ uses: actions/setup-python@v2
18
+ with:
19
+ python-version: 3.8
20
+
21
+ - name: Install requirements
22
+ run: pip install -r requirements.txt
23
+
24
+ - name: Execute scoring script
25
+ env:
26
+ HF_TOKEN: ${{ secrets.HF_GEM_TOKEN }}
27
+ run: |
28
+ HF_TOKEN=$HF_TOKEN AUTOTRAIN_USERNAME=$AUTOTRAIN_USERNAME AUTOTRAIN_BACKEND_API=$AUTOTRAIN_BACKEND_API python run_evaluation_jobs.py
README.md CHANGED
@@ -39,7 +39,7 @@ pip install -r requirements.txt
39
  Next, copy the example file of environment variables:
40
 
41
  ```
42
- cp .env.examples .env
43
  ```
44
 
45
  and set the `HF_TOKEN` variable with a valid API token from the `autoevaluator` user. Finally, spin up the application by running:
@@ -53,5 +53,11 @@ streamlit run app.py
53
  Models are evaluated by AutoTrain, with the payload sent to the `AUTOTRAIN_BACKEND_API` environment variable. The current configuration for evaluation jobs running on Spaces is:
54
 
55
  ```
56
- AUTOTRAIN_BACKEND_API=https://api.autotrain.huggingface.co
 
 
 
 
 
 
57
  ```
39
  Next, copy the example file of environment variables:
40
 
41
  ```
42
+ cp .env.template .env
43
  ```
44
 
45
  and set the `HF_TOKEN` variable with a valid API token from the `autoevaluator` user. Finally, spin up the application by running:
53
  Models are evaluated by AutoTrain, with the payload sent to the `AUTOTRAIN_BACKEND_API` environment variable. The current configuration for evaluation jobs running on Spaces is:
54
 
55
  ```
56
+ AUTOTRAIN_BACKEND_API=https://api-staging.autotrain.huggingface.co
57
+ ```
58
+
59
+ To evaluate models with a _local_ instance of AutoTrain, change the environment to:
60
+
61
+ ```
62
+ AUTOTRAIN_BACKEND_API=http://localhost:8000
63
  ```
app.py CHANGED
@@ -1,4 +1,5 @@
1
  import os
 
2
  from pathlib import Path
3
 
4
  import pandas as pd
@@ -510,50 +511,77 @@ with st.form(key="form"):
510
  ).json()
511
  print(f"INFO -- Dataset creation response: {data_json_resp}")
512
  if data_json_resp["download_status"] == 1:
513
- train_json_resp = http_get(
514
- path=f"/projects/{project_json_resp['id']}/data/start_process",
515
  token=HF_TOKEN,
516
  domain=AUTOTRAIN_BACKEND_API,
517
  ).json()
518
- print(f"INFO -- AutoTrain job response: {train_json_resp}")
519
- if train_json_resp["success"]:
520
- train_eval_index = {
521
- "train-eval-index": [
522
- {
523
- "config": selected_config,
524
- "task": AUTOTRAIN_TASK_TO_HUB_TASK[selected_task],
525
- "task_id": selected_task,
526
- "splits": {"eval_split": selected_split},
527
- "col_mapping": col_mapping,
528
- }
529
- ]
530
- }
531
- selected_metadata = yaml.dump(train_eval_index, sort_keys=False)
532
- dataset_card_url = get_dataset_card_url(selected_dataset)
533
- st.success("βœ… Successfully submitted evaluation job!")
534
- st.markdown(
535
- f"""
536
- Evaluation can take up to 1 hour to complete, so grab a β˜•οΈ or 🍡 while you wait:
537
-
538
- * πŸ”” A [Hub pull request](https://huggingface.co/docs/hub/repositories-pull-requests-discussions) with the evaluation results will be opened for each model you selected. Check your email for notifications.
539
- * πŸ“Š Click [here](https://hf.co/spaces/autoevaluate/leaderboards?dataset={selected_dataset}) to view the results from your submission once the Hub pull request is merged.
540
- * πŸ₯± Tired of configuring evaluations? Add the following metadata to the [dataset card]({dataset_card_url}) to enable 1-click evaluations:
541
- """ # noqa
542
- )
543
- st.markdown(
544
- f"""
545
- ```yaml
546
- {selected_metadata}
547
- """
548
- )
549
- print("INFO -- Pushing evaluation job logs to the Hub")
550
- evaluation_log = {}
551
- evaluation_log["payload"] = project_payload
552
- evaluation_log["project_creation_response"] = project_json_resp
553
- evaluation_log["dataset_creation_response"] = data_json_resp
554
- evaluation_log["autotrain_job_response"] = train_json_resp
555
- commit_evaluation_log(evaluation_log, hf_access_token=HF_TOKEN)
556
  else:
557
- st.error("πŸ™ˆ Oh no, there was an error submitting your evaluation job!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
558
  else:
559
  st.warning("⚠️ No models left to evaluate! Please select other models and try again.")
1
  import os
2
+ import time
3
  from pathlib import Path
4
 
5
  import pandas as pd
511
  ).json()
512
  print(f"INFO -- Dataset creation response: {data_json_resp}")
513
  if data_json_resp["download_status"] == 1:
514
+ train_json_resp = http_post(
515
+ path=f"/projects/{project_json_resp['id']}/data/start_processing",
516
  token=HF_TOKEN,
517
  domain=AUTOTRAIN_BACKEND_API,
518
  ).json()
519
+ # For local development we process and approve projects on-the-fly
520
+ if "localhost" in AUTOTRAIN_BACKEND_API:
521
+ with st.spinner("⏳ Waiting for data processing to complete ..."):
522
+ is_data_processing_success = False
523
+ while is_data_processing_success is not True:
524
+ project_status = http_get(
525
+ path=f"/projects/{project_json_resp['id']}",
526
+ token=HF_TOKEN,
527
+ domain=AUTOTRAIN_BACKEND_API,
528
+ ).json()
529
+ if project_status["status"] == 3:
530
+ is_data_processing_success = True
531
+ time.sleep(10)
532
+
533
+ # Approve training job
534
+ train_job_resp = http_post(
535
+ path=f"/projects/{project_json_resp['id']}/start_training",
536
+ token=HF_TOKEN,
537
+ domain=AUTOTRAIN_BACKEND_API,
538
+ ).json()
539
+ st.success("βœ… Data processing and project approval complete - go forth and evaluate!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
540
  else:
541
+ # Prod/staging submissions are evaluated in a cron job via run_evaluation_jobs.py
542
+ print(f"INFO -- AutoTrain job response: {train_json_resp}")
543
+ if train_json_resp["success"]:
544
+ train_eval_index = {
545
+ "train-eval-index": [
546
+ {
547
+ "config": selected_config,
548
+ "task": AUTOTRAIN_TASK_TO_HUB_TASK[selected_task],
549
+ "task_id": selected_task,
550
+ "splits": {"eval_split": selected_split},
551
+ "col_mapping": col_mapping,
552
+ }
553
+ ]
554
+ }
555
+ selected_metadata = yaml.dump(train_eval_index, sort_keys=False)
556
+ dataset_card_url = get_dataset_card_url(selected_dataset)
557
+ st.success("βœ… Successfully submitted evaluation job!")
558
+ st.markdown(
559
+ f"""
560
+ Evaluation can take up to 1 hour to complete, so grab a β˜•οΈ or 🍡 while you wait:
561
+
562
+ * πŸ”” A [Hub pull request](https://huggingface.co/docs/hub/repositories-pull-requests-discussions) with the evaluation results will be opened for each model you selected. Check your email for notifications.
563
+ * πŸ“Š Click [here](https://hf.co/spaces/autoevaluate/leaderboards?dataset={selected_dataset}) to view the results from your submission once the Hub pull request is merged.
564
+ * πŸ₯± Tired of configuring evaluations? Add the following metadata to the [dataset card]({dataset_card_url}) to enable 1-click evaluations:
565
+ """ # noqa
566
+ )
567
+ st.markdown(
568
+ f"""
569
+ ```yaml
570
+ {selected_metadata}
571
+ """
572
+ )
573
+ print("INFO -- Pushing evaluation job logs to the Hub")
574
+ evaluation_log = {}
575
+ evaluation_log["project_id"] = project_json_resp["id"]
576
+ evaluation_log["autotrain_env"] = (
577
+ "staging" if "staging" in AUTOTRAIN_BACKEND_API else "prod"
578
+ )
579
+ evaluation_log["payload"] = project_payload
580
+ evaluation_log["project_creation_response"] = project_json_resp
581
+ evaluation_log["dataset_creation_response"] = data_json_resp
582
+ evaluation_log["autotrain_job_response"] = train_json_resp
583
+ commit_evaluation_log(evaluation_log, hf_access_token=HF_TOKEN)
584
+ else:
585
+ st.error("πŸ™ˆ Oh no, there was an error submitting your evaluation job!")
586
  else:
587
  st.warning("⚠️ No models left to evaluate! Please select other models and try again.")
requirements.txt CHANGED
@@ -4,6 +4,7 @@ streamlit==1.10.0
4
  datasets<2.3
5
  evaluate<0.2
6
  jsonlines
 
7
  # Dataset specific deps
8
  py7zr<0.19
9
  openpyxl<3.1
4
  datasets<2.3
5
  evaluate<0.2
6
  jsonlines
7
+ typer[rich]
8
  # Dataset specific deps
9
  py7zr<0.19
10
  openpyxl<3.1
run_evaluation_jobs.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pathlib import Path
3
+
4
+ import typer
5
+ from datasets import load_dataset
6
+ from dotenv import load_dotenv
7
+ from rich import print
8
+
9
+ from utils import http_get, http_post
10
+
11
+ if Path(".env").is_file():
12
+ load_dotenv(".env")
13
+
14
+ HF_TOKEN = os.getenv("HF_TOKEN")
15
+ AUTOTRAIN_USERNAME = os.getenv("AUTOTRAIN_USERNAME")
16
+ AUTOTRAIN_BACKEND_API = os.getenv("AUTOTRAIN_BACKEND_API")
17
+
18
+ if "staging" in AUTOTRAIN_BACKEND_API:
19
+ AUTOTRAIN_ENV = "staging"
20
+ else:
21
+ AUTOTRAIN_ENV = "prod"
22
+
23
+
24
+ def main():
25
+ logs_df = load_dataset("autoevaluate/evaluation-job-logs", use_auth_token=True, split="train").to_pandas()
26
+ # Filter out legacy AutoTrain submissions prior to project approvals requirement
27
+ projects_df = logs_df.copy()[(~logs_df["project_id"].isnull())]
28
+ # Filter IDs for appropriate AutoTrain env (staging vs prod)
29
+ projects_df = projects_df.copy().query(f"autotrain_env == '{AUTOTRAIN_ENV}'")
30
+ projects_to_approve = projects_df["project_id"].astype(int).tolist()
31
+ failed_approvals = []
32
+ print(f"πŸš€ Found {len(projects_to_approve)} evaluation projects to approve!")
33
+
34
+ for project_id in projects_to_approve:
35
+ print(f"Attempting to evaluate project ID {project_id} ...")
36
+ try:
37
+ project_info = http_get(
38
+ path=f"/projects/{project_id}",
39
+ token=HF_TOKEN,
40
+ domain=AUTOTRAIN_BACKEND_API,
41
+ ).json()
42
+ print(project_info)
43
+ # Only start evaluation for projects with completed data processing (status=3)
44
+ if project_info["status"] == 3 and project_info["training_status"] == "not_started":
45
+ train_job_resp = http_post(
46
+ path=f"/projects/{project_id}/start_training",
47
+ token=HF_TOKEN,
48
+ domain=AUTOTRAIN_BACKEND_API,
49
+ ).json()
50
+ print(f"πŸ€– Project {project_id} approval response: {train_job_resp}")
51
+ else:
52
+ print(f"πŸ’ͺ Project {project_id} either not ready or has already been evaluated. Skipping ...")
53
+ except Exception as e:
54
+ print(f"There was a problem obtaining the project info for project ID {project_id}")
55
+ print(f"Error message: {e}")
56
+ failed_approvals.append(project_id)
57
+ pass
58
+
59
+ if len(failed_approvals) > 0:
60
+ print(f"🚨 Failed to approve {len(failed_approvals)} projects: {failed_approvals}")
61
+
62
+
63
+ if __name__ == "__main__":
64
+ typer.run(main)