Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
t0-0
commited on
Commit
·
567d2b9
1
Parent(s):
72d6a63
Change displayed information in queue and leaderboard
Browse files- src/display/utils.py +3 -4
- src/leaderboard/read_evals.py +2 -4
src/display/utils.py
CHANGED
@@ -40,8 +40,7 @@ auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precisi
|
|
40 |
auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False)])
|
41 |
auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)])
|
42 |
auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)])
|
43 |
-
auto_eval_column_dict.append(["
|
44 |
-
auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
|
45 |
auto_eval_column_dict.append(["num_few_shots", ColumnContent, ColumnContent("Few-shot", "str", False)])
|
46 |
auto_eval_column_dict.append(["add_special_tokens", ColumnContent, ColumnContent("Add Special Tokens", "bool", False)])
|
47 |
auto_eval_column_dict.append(
|
@@ -59,9 +58,9 @@ AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=
|
|
59 |
class EvalQueueColumn: # Queue column
|
60 |
model = ColumnContent("model", "markdown", True)
|
61 |
revision = ColumnContent("revision", "str", True)
|
62 |
-
|
63 |
precision = ColumnContent("precision", "str", True)
|
64 |
-
|
65 |
status = ColumnContent("status", "str", True)
|
66 |
|
67 |
|
|
|
40 |
auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False)])
|
41 |
auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)])
|
42 |
auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)])
|
43 |
+
auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Revision", "str", False, False)])
|
|
|
44 |
auto_eval_column_dict.append(["num_few_shots", ColumnContent, ColumnContent("Few-shot", "str", False)])
|
45 |
auto_eval_column_dict.append(["add_special_tokens", ColumnContent, ColumnContent("Add Special Tokens", "bool", False)])
|
46 |
auto_eval_column_dict.append(
|
|
|
58 |
class EvalQueueColumn: # Queue column
|
59 |
model = ColumnContent("model", "markdown", True)
|
60 |
revision = ColumnContent("revision", "str", True)
|
61 |
+
model_type = ColumnContent("model_type", "str", True)
|
62 |
precision = ColumnContent("precision", "str", True)
|
63 |
+
add_special_tokens = ColumnContent("add_special_tokens", "str", True)
|
64 |
status = ColumnContent("status", "str", True)
|
65 |
|
66 |
|
src/leaderboard/read_evals.py
CHANGED
@@ -31,7 +31,6 @@ class EvalResult:
|
|
31 |
likes: int = 0
|
32 |
num_params: int = 0
|
33 |
date: str = "" # submission date of request file
|
34 |
-
still_on_hub: bool = False
|
35 |
num_few_shots: str = "0"
|
36 |
add_special_tokens: str = ""
|
37 |
llm_jp_eval_version: str = ""
|
@@ -66,6 +65,7 @@ class EvalResult:
|
|
66 |
|
67 |
version = Version.from_str(metainfo.get("version", "?")).value.name
|
68 |
backend = Backend.from_str(model_config.get("_target_", "?").split(".")[0]).value.name
|
|
|
69 |
|
70 |
# Get model and org
|
71 |
# org_and_model = config.get("model_name", config.get("offline_inference").get("model_name", None))
|
@@ -116,8 +116,7 @@ class EvalResult:
|
|
116 |
model=model,
|
117 |
results=results,
|
118 |
precision=precision,
|
119 |
-
revision=
|
120 |
-
still_on_hub=still_on_hub,
|
121 |
architecture=architecture,
|
122 |
num_few_shots=num_few_shots,
|
123 |
add_special_tokens=add_special_tokens,
|
@@ -157,7 +156,6 @@ class EvalResult:
|
|
157 |
AutoEvalColumn.license.name: self.license,
|
158 |
AutoEvalColumn.likes.name: self.likes,
|
159 |
AutoEvalColumn.params.name: self.num_params,
|
160 |
-
AutoEvalColumn.still_on_hub.name: self.still_on_hub,
|
161 |
AutoEvalColumn.num_few_shots.name: self.num_few_shots,
|
162 |
AutoEvalColumn.add_special_tokens.name: self.add_special_tokens,
|
163 |
AutoEvalColumn.llm_jp_eval_version.name: self.llm_jp_eval_version,
|
|
|
31 |
likes: int = 0
|
32 |
num_params: int = 0
|
33 |
date: str = "" # submission date of request file
|
|
|
34 |
num_few_shots: str = "0"
|
35 |
add_special_tokens: str = ""
|
36 |
llm_jp_eval_version: str = ""
|
|
|
65 |
|
66 |
version = Version.from_str(metainfo.get("version", "?")).value.name
|
67 |
backend = Backend.from_str(model_config.get("_target_", "?").split(".")[0]).value.name
|
68 |
+
revision = model_config.get("revision", "")
|
69 |
|
70 |
# Get model and org
|
71 |
# org_and_model = config.get("model_name", config.get("offline_inference").get("model_name", None))
|
|
|
116 |
model=model,
|
117 |
results=results,
|
118 |
precision=precision,
|
119 |
+
revision=revision,
|
|
|
120 |
architecture=architecture,
|
121 |
num_few_shots=num_few_shots,
|
122 |
add_special_tokens=add_special_tokens,
|
|
|
156 |
AutoEvalColumn.license.name: self.license,
|
157 |
AutoEvalColumn.likes.name: self.likes,
|
158 |
AutoEvalColumn.params.name: self.num_params,
|
|
|
159 |
AutoEvalColumn.num_few_shots.name: self.num_few_shots,
|
160 |
AutoEvalColumn.add_special_tokens.name: self.add_special_tokens,
|
161 |
AutoEvalColumn.llm_jp_eval_version.name: self.llm_jp_eval_version,
|