File size: 1,747 Bytes
68fa4ef 06f147b b6fd6ce 06f147b 21214a8 4f0be60 b6fd6ce 06f147b 68fa4ef |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
from dataclasses import dataclass, make_dataclass
def fields(raw_class):
return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
# These classes are for user facing column names,
# to avoid having to change them all around the code
# when a modif is needed
@dataclass
class ColumnContent:
name: str
type: str
displayed_by_default: bool
hidden: bool = False
never_hidden: bool = False
## Leaderboard columns
# Init
auto_eval_column_dict = [
["eval_name", ColumnContent, ColumnContent("Eval Name", "str", True)],
["result_name", ColumnContent, ColumnContent("Result Name", "str", False)],
["date", ColumnContent, ColumnContent("Submission Date", "str", True)],
["miou", ColumnContent, ColumnContent("mIoU ⬆️", "number", True)],
["accuracy", ColumnContent, ColumnContent("Accuracy ⬆️", "number", False)],
# ["precision_score", ColumnContent, ColumnContent("Precision ⬆️", "number", False)],
# ["recall", ColumnContent, ColumnContent("Recall ⬆️", "number", False)],
# ["f1", ColumnContent, ColumnContent("F1 ⬆️", "number", False)],
# ["producer_accuracy", ColumnContent, ColumnContent("Producer Accuracy", "list", False)],
# ["user_accuracy", ColumnContent, ColumnContent("User Accuracy", "list", False)],
# ["confusion_matrix", ColumnContent, ColumnContent("Confusion Matrix", "matrix", False)],
# ["num_classes", ColumnContent, ColumnContent("Number of classes", "number", False)],
]
# We use make dataclass to dynamically fill the scores from Tasks
AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
# Column selection
COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]
|