Spaces:
Running
Running
Paul Hager
commited on
Commit
·
8b2cb77
1
Parent(s):
0e12557
Dict key bug
Browse files- src/display/utils.py +1 -1
- src/leaderboard/read_evals.py +5 -5
src/display/utils.py
CHANGED
@@ -40,7 +40,7 @@ auto_eval_column_dict.append(["seq_length", ColumnContent, ColumnContent("Max Se
|
|
40 |
auto_eval_column_dict.append(["model_quantization_bits", ColumnContent, ColumnContent("Quantization Bits", "number", False)])
|
41 |
# auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)])
|
42 |
auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
|
43 |
-
auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
|
44 |
|
45 |
# We use make dataclass to dynamically fill the scores from Tasks
|
46 |
AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
|
|
|
40 |
auto_eval_column_dict.append(["model_quantization_bits", ColumnContent, ColumnContent("Quantization Bits", "number", False)])
|
41 |
# auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)])
|
42 |
auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
|
43 |
+
# auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
|
44 |
|
45 |
# We use make dataclass to dynamically fill the scores from Tasks
|
46 |
AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
|
src/leaderboard/read_evals.py
CHANGED
@@ -121,13 +121,13 @@ class EvalResult:
|
|
121 |
average = sum([v for v in self.results.values() if v is not None]) / len(Tasks)
|
122 |
data_dict = {
|
123 |
"eval_name": self.eval_name, # not a column, just a save name,
|
124 |
-
AutoEvalColumn.precision.name: self.precision.value.name,
|
125 |
-
AutoEvalColumn.model_type.name: self.model_type.value.name,
|
126 |
-
AutoEvalColumn.model_type_symbol.name: self.model_type.value.symbol,
|
127 |
-
AutoEvalColumn.weight_type.name: self.weight_type.value.name,
|
128 |
AutoEvalColumn.architecture.name: self.architecture,
|
129 |
AutoEvalColumn.model.name: make_clickable_model(self.full_model),
|
130 |
-
AutoEvalColumn.revision.name: self.revision,
|
131 |
AutoEvalColumn.average.name: average,
|
132 |
# AutoEvalColumn.license.name: self.license,
|
133 |
# AutoEvalColumn.likes.name: self.likes,
|
|
|
121 |
average = sum([v for v in self.results.values() if v is not None]) / len(Tasks)
|
122 |
data_dict = {
|
123 |
"eval_name": self.eval_name, # not a column, just a save name,
|
124 |
+
# AutoEvalColumn.precision.name: self.precision.value.name,
|
125 |
+
# AutoEvalColumn.model_type.name: self.model_type.value.name,
|
126 |
+
# AutoEvalColumn.model_type_symbol.name: self.model_type.value.symbol,
|
127 |
+
# AutoEvalColumn.weight_type.name: self.weight_type.value.name,
|
128 |
AutoEvalColumn.architecture.name: self.architecture,
|
129 |
AutoEvalColumn.model.name: make_clickable_model(self.full_model),
|
130 |
+
# AutoEvalColumn.revision.name: self.revision,
|
131 |
AutoEvalColumn.average.name: average,
|
132 |
# AutoEvalColumn.license.name: self.license,
|
133 |
# AutoEvalColumn.likes.name: self.likes,
|