Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
rename-official-providers
#1029
by
alozowski
HF staff
- opened
- app.py +1 -1
- src/display/utils.py +4 -4
- src/envs.py +1 -1
- src/leaderboard/filter_models.py +2 -2
- src/submission/check_validity.py +3 -3
app.py
CHANGED
@@ -215,7 +215,7 @@ def init_leaderboard(dataframe):
|
|
215 |
),
|
216 |
ColumnFilter(AutoEvalColumn.moe.name, type="boolean", label="MoE", default=False),
|
217 |
ColumnFilter(AutoEvalColumn.not_flagged.name, type="boolean", label="Flagged", default=True),
|
218 |
-
ColumnFilter(AutoEvalColumn.
|
219 |
],
|
220 |
bool_checkboxgroup_label="Hide models",
|
221 |
interactive=False,
|
|
|
215 |
),
|
216 |
ColumnFilter(AutoEvalColumn.moe.name, type="boolean", label="MoE", default=False),
|
217 |
ColumnFilter(AutoEvalColumn.not_flagged.name, type="boolean", label="Flagged", default=True),
|
218 |
+
ColumnFilter(AutoEvalColumn.official_providers.name, type="boolean", label="Show only official providers", default=False),
|
219 |
],
|
220 |
bool_checkboxgroup_label="Hide models",
|
221 |
interactive=False,
|
src/display/utils.py
CHANGED
@@ -6,13 +6,13 @@ import logging
|
|
6 |
from datetime import datetime
|
7 |
import pandas as pd
|
8 |
|
9 |
-
from src.envs import
|
10 |
|
11 |
# Configure logging
|
12 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
13 |
|
14 |
-
dataset = load_dataset(
|
15 |
-
|
16 |
|
17 |
# Convert ISO 8601 dates to datetime objects for comparison
|
18 |
def parse_iso8601_datetime(date_str):
|
@@ -120,7 +120,7 @@ auto_eval_column_dict.append(["submission_date", ColumnContent, ColumnContent("S
|
|
120 |
auto_eval_column_dict.append(["upload_to_hub", ColumnContent, ColumnContent("Upload To Hub Date", "bool", False, hidden=False)])
|
121 |
|
122 |
auto_eval_column_dict.append(["use_chat_template", ColumnContent, ColumnContent("Chat Template", "bool", False)])
|
123 |
-
auto_eval_column_dict.append(["
|
124 |
|
125 |
# fullname structure: <user>/<model_name>
|
126 |
auto_eval_column_dict.append(["fullname", ColumnContent, ColumnContent("fullname", "str", False, dummy=True)])
|
|
|
6 |
from datetime import datetime
|
7 |
import pandas as pd
|
8 |
|
9 |
+
from src.envs import OFFICIAL_PROVIDERS_REPO
|
10 |
|
11 |
# Configure logging
|
12 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
13 |
|
14 |
+
dataset = load_dataset(OFFICIAL_PROVIDERS_REPO)
|
15 |
+
official_providers = dataset["train"][0]["CURATED_SET"]
|
16 |
|
17 |
# Convert ISO 8601 dates to datetime objects for comparison
|
18 |
def parse_iso8601_datetime(date_str):
|
|
|
120 |
auto_eval_column_dict.append(["upload_to_hub", ColumnContent, ColumnContent("Upload To Hub Date", "bool", False, hidden=False)])
|
121 |
|
122 |
auto_eval_column_dict.append(["use_chat_template", ColumnContent, ColumnContent("Chat Template", "bool", False)])
|
123 |
+
auto_eval_column_dict.append(["official_providers", ColumnContent, ColumnContent("Official Providers", "bool", False, hidden=True)])
|
124 |
|
125 |
# fullname structure: <user>/<model_name>
|
126 |
auto_eval_column_dict.append(["fullname", ColumnContent, ColumnContent("fullname", "str", False, dummy=True)])
|
src/envs.py
CHANGED
@@ -8,7 +8,7 @@ REPO_ID = "open-llm-leaderboard/open_llm_leaderboard"
|
|
8 |
QUEUE_REPO = "open-llm-leaderboard/requests"
|
9 |
AGGREGATED_REPO = "open-llm-leaderboard/contents"
|
10 |
VOTES_REPO = "open-llm-leaderboard/votes"
|
11 |
-
|
12 |
|
13 |
HF_HOME = os.getenv("HF_HOME", ".")
|
14 |
|
|
|
8 |
QUEUE_REPO = "open-llm-leaderboard/requests"
|
9 |
AGGREGATED_REPO = "open-llm-leaderboard/contents"
|
10 |
VOTES_REPO = "open-llm-leaderboard/votes"
|
11 |
+
OFFICIAL_PROVIDERS_REPO = "open-llm-leaderboard/official-providers"
|
12 |
|
13 |
HF_HOME = os.getenv("HF_HOME", ".")
|
14 |
|
src/leaderboard/filter_models.py
CHANGED
@@ -19,8 +19,8 @@ DO_NOT_SUBMIT_MODELS = [
|
|
19 |
def flag_models(leaderboard_data: list[dict]):
|
20 |
"""Flags models based on external criteria or flagged status."""
|
21 |
for model_data in leaderboard_data:
|
22 |
-
# Skip flagging if
|
23 |
-
if model_data.get(AutoEvalColumn.
|
24 |
model_data[AutoEvalColumn.not_flagged.name] = True
|
25 |
continue
|
26 |
|
|
|
19 |
def flag_models(leaderboard_data: list[dict]):
|
20 |
"""Flags models based on external criteria or flagged status."""
|
21 |
for model_data in leaderboard_data:
|
22 |
+
# Skip flagging if official providers is True
|
23 |
+
if model_data.get(AutoEvalColumn.official_providers.name, False):
|
24 |
model_data[AutoEvalColumn.not_flagged.name] = True
|
25 |
continue
|
26 |
|
src/submission/check_validity.py
CHANGED
@@ -10,7 +10,7 @@ from huggingface_hub import ModelCard, hf_hub_download
|
|
10 |
from huggingface_hub.hf_api import ModelInfo, get_safetensors_metadata, parse_safetensors_file_metadata
|
11 |
from transformers import AutoConfig, AutoTokenizer
|
12 |
|
13 |
-
from src.display.utils import parse_iso8601_datetime,
|
14 |
from src.envs import HAS_HIGHER_RATE_LIMIT
|
15 |
|
16 |
|
@@ -127,8 +127,8 @@ def get_model_arch(model_info: ModelInfo):
|
|
127 |
|
128 |
|
129 |
def user_submission_permission(org_or_user, users_to_submission_dates, rate_limit_period, rate_limit_quota):
|
130 |
-
# No limit for
|
131 |
-
if org_or_user in
|
132 |
return True, ""
|
133 |
# Increase quota first if user has higher limits
|
134 |
if org_or_user in HAS_HIGHER_RATE_LIMIT:
|
|
|
10 |
from huggingface_hub.hf_api import ModelInfo, get_safetensors_metadata, parse_safetensors_file_metadata
|
11 |
from transformers import AutoConfig, AutoTokenizer
|
12 |
|
13 |
+
from src.display.utils import parse_iso8601_datetime, official_providers
|
14 |
from src.envs import HAS_HIGHER_RATE_LIMIT
|
15 |
|
16 |
|
|
|
127 |
|
128 |
|
129 |
def user_submission_permission(org_or_user, users_to_submission_dates, rate_limit_period, rate_limit_quota):
|
130 |
+
# No limit for official providers
|
131 |
+
if org_or_user in official_providers:
|
132 |
return True, ""
|
133 |
# Increase quota first if user has higher limits
|
134 |
if org_or_user in HAS_HIGHER_RATE_LIMIT:
|