Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Rename
Browse files- src/envs.py +2 -2
- src/submission/submit.py +4 -2
src/envs.py
CHANGED
@@ -5,7 +5,7 @@ from huggingface_hub import HfApi
|
|
5 |
|
6 |
# Info to change for your repository
|
7 |
# ----------------------------------
|
8 |
-
|
9 |
|
10 |
OWNER = "llm-jp" # Change to your org - don't forget to create a results and request dataset, with the correct format!
|
11 |
# ----------------------------------
|
@@ -20,4 +20,4 @@ CACHE_PATH = pathlib.Path(os.getenv("HF_HOME", "."))
|
|
20 |
# Local caches
|
21 |
EVAL_REQUESTS_PATH = CACHE_PATH / "eval-queue"
|
22 |
|
23 |
-
API = HfApi(token=
|
|
|
5 |
|
6 |
# Info to change for your repository
|
7 |
# ----------------------------------
|
8 |
+
HF_TOKEN = os.environ.get("HF_TOKEN") # A read/write token for your org
|
9 |
|
10 |
OWNER = "llm-jp" # Change to your org - don't forget to create a results and request dataset, with the correct format!
|
11 |
# ----------------------------------
|
|
|
20 |
# Local caches
|
21 |
EVAL_REQUESTS_PATH = CACHE_PATH / "eval-queue"
|
22 |
|
23 |
+
API = HfApi(token=HF_TOKEN)
|
src/submission/submit.py
CHANGED
@@ -3,7 +3,7 @@ from datetime import datetime, timezone
|
|
3 |
|
4 |
from src.display.formatting import styled_error, styled_message, styled_warning
|
5 |
from src.display.utils import EvalQueuedModel, LLMJpEvalVersion, VllmVersion
|
6 |
-
from src.envs import API, EVAL_REQUESTS_PATH,
|
7 |
from src.submission.check_validity import already_submitted_models, check_model_card, is_model_on_hub
|
8 |
|
9 |
REQUESTED_MODELS: set[EvalQueuedModel] = set()
|
@@ -48,7 +48,9 @@ def add_new_eval(
|
|
48 |
return styled_error("Please select a model type.")
|
49 |
|
50 |
# Is the model on the hub?
|
51 |
-
model_on_hub, error, _ = is_model_on_hub(
|
|
|
|
|
52 |
if not model_on_hub:
|
53 |
return styled_error(f'Model "{model_id}" {error}')
|
54 |
|
|
|
3 |
|
4 |
from src.display.formatting import styled_error, styled_message, styled_warning
|
5 |
from src.display.utils import EvalQueuedModel, LLMJpEvalVersion, VllmVersion
|
6 |
+
from src.envs import API, EVAL_REQUESTS_PATH, HF_TOKEN, QUEUE_REPO
|
7 |
from src.submission.check_validity import already_submitted_models, check_model_card, is_model_on_hub
|
8 |
|
9 |
REQUESTED_MODELS: set[EvalQueuedModel] = set()
|
|
|
48 |
return styled_error("Please select a model type.")
|
49 |
|
50 |
# Is the model on the hub?
|
51 |
+
model_on_hub, error, _ = is_model_on_hub(
|
52 |
+
model_name=model_id, revision=revision, token=HF_TOKEN, test_tokenizer=True
|
53 |
+
)
|
54 |
if not model_on_hub:
|
55 |
return styled_error(f'Model "{model_id}" {error}')
|
56 |
|