Spaces:
Sleeping
Sleeping
Merge and debugging submissione error
Browse files- app.py +5 -5
- src/envs.py +1 -1
- src/submission/check_validity.py +5 -0
app.py
CHANGED
@@ -3,6 +3,7 @@ import gradio as gr
|
|
3 |
import pandas as pd
|
4 |
from apscheduler.schedulers.background import BackgroundScheduler
|
5 |
from huggingface_hub import snapshot_download
|
|
|
6 |
|
7 |
from src.about import (
|
8 |
CITATION_BUTTON_LABEL,
|
@@ -26,7 +27,7 @@ from src.display.utils import (
|
|
26 |
WeightType,
|
27 |
Precision
|
28 |
)
|
29 |
-
from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO
|
30 |
from src.populate import get_evaluation_queue_df, get_leaderboard_df
|
31 |
from src.submission.submit import add_new_eval
|
32 |
|
@@ -40,18 +41,17 @@ def launch_backend():
|
|
40 |
_ = subprocess.run(["python", "main_backend.py"])
|
41 |
|
42 |
try:
|
43 |
-
print(EVAL_REQUESTS_PATH)
|
44 |
snapshot_download(
|
45 |
-
repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30
|
46 |
)
|
47 |
except Exception:
|
48 |
print("Could not download snapshot. Restarting.")
|
49 |
restart_space()
|
50 |
try:
|
51 |
-
print(EVAL_RESULTS_PATH)
|
52 |
snapshot_download(
|
53 |
-
repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30
|
54 |
)
|
|
|
55 |
except Exception:
|
56 |
print("Could not download snapshot. Restarting.")
|
57 |
restart_space()
|
|
|
3 |
import pandas as pd
|
4 |
from apscheduler.schedulers.background import BackgroundScheduler
|
5 |
from huggingface_hub import snapshot_download
|
6 |
+
from os import listdir
|
7 |
|
8 |
from src.about import (
|
9 |
CITATION_BUTTON_LABEL,
|
|
|
27 |
WeightType,
|
28 |
Precision
|
29 |
)
|
30 |
+
from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
|
31 |
from src.populate import get_evaluation_queue_df, get_leaderboard_df
|
32 |
from src.submission.submit import add_new_eval
|
33 |
|
|
|
41 |
_ = subprocess.run(["python", "main_backend.py"])
|
42 |
|
43 |
try:
|
|
|
44 |
snapshot_download(
|
45 |
+
repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
|
46 |
)
|
47 |
except Exception:
|
48 |
print("Could not download snapshot. Restarting.")
|
49 |
restart_space()
|
50 |
try:
|
|
|
51 |
snapshot_download(
|
52 |
+
repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
|
53 |
)
|
54 |
+
print(listdir(EVAL_RESULTS_PATH))
|
55 |
except Exception:
|
56 |
print("Could not download snapshot. Restarting.")
|
57 |
restart_space()
|
src/envs.py
CHANGED
@@ -4,7 +4,7 @@ from huggingface_hub import HfApi
|
|
4 |
|
5 |
# Info to change for your repository
|
6 |
# ----------------------------------
|
7 |
-
TOKEN = os.environ.get("TOKEN") # A read/write token for your org
|
8 |
|
9 |
OWNER = "Bias-Leaderboard" # Change to your org - don't forget to create a results and request file
|
10 |
DEVICE = "cpu" # cuda:0 if you add compute
|
|
|
4 |
|
5 |
# Info to change for your repository
|
6 |
# ----------------------------------
|
7 |
+
TOKEN = os.environ.get("TOKEN") # A read/write user token for a user that has access to your org
|
8 |
|
9 |
OWNER = "Bias-Leaderboard" # Change to your org - don't forget to create a results and request file
|
10 |
DEVICE = "cpu" # cuda:0 if you add compute
|
src/submission/check_validity.py
CHANGED
@@ -36,6 +36,11 @@ def is_model_on_hub(model_name: str, revision: str, token: str = None, trust_rem
|
|
36 |
config = AutoConfig.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
|
37 |
if test_tokenizer:
|
38 |
try:
|
|
|
|
|
|
|
|
|
|
|
39 |
tk = AutoTokenizer.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
|
40 |
except ValueError as e:
|
41 |
return (
|
|
|
36 |
config = AutoConfig.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
|
37 |
if test_tokenizer:
|
38 |
try:
|
39 |
+
print("TOKEN: %s" % token)
|
40 |
+
print("model name: %s" % model_name)
|
41 |
+
print("revision: %s" % revision)
|
42 |
+
print("trust remote code: %s" % trust_remote_code)
|
43 |
+
|
44 |
tk = AutoTokenizer.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
|
45 |
except ValueError as e:
|
46 |
return (
|