hSterz commited on
Commit
f2620b7
·
1 Parent(s): 19fe2de

REmove columns we don't want

Browse files
Files changed (3) hide show
  1. app.py +2 -15
  2. src/display/utils.py +0 -6
  3. src/leaderboard/read_evals.py +1 -8
app.py CHANGED
@@ -68,22 +68,9 @@ def init_leaderboard(dataframe):
68
  cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden],
69
  label="Select Columns to Display:",
70
  ),
71
- search_columns=[AutoEvalColumn.model.name, AutoEvalColumn.license.name],
72
  hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
73
- filter_columns=[
74
- ColumnFilter(AutoEvalColumn.model_type.name, type="checkboxgroup", label="Model types"),
75
- ColumnFilter(AutoEvalColumn.precision.name, type="checkboxgroup", label="Precision"),
76
- ColumnFilter(
77
- AutoEvalColumn.params.name,
78
- type="slider",
79
- min=0.01,
80
- max=150,
81
- label="Select the number of parameters (B)",
82
- ),
83
- ColumnFilter(
84
- AutoEvalColumn.still_on_hub.name, type="boolean", label="Deleted/incomplete", default=True
85
- ),
86
- ],
87
  bool_checkboxgroup_label="Hide models",
88
  interactive=False,
89
  )
 
68
  cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden],
69
  label="Select Columns to Display:",
70
  ),
71
+ search_columns=[AutoEvalColumn.model.name],
72
  hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
73
+ filter_columns=[],
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  bool_checkboxgroup_label="Hide models",
75
  interactive=False,
76
  )
src/display/utils.py CHANGED
@@ -32,12 +32,6 @@ for task in Tasks:
32
  # Model information
33
  auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
34
  auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
35
- auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, True)])
36
- auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False)])
37
- auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False)])
38
- auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)])
39
- auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)])
40
- auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
41
  auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
42
 
43
  # We use make dataclass to dynamically fill the scores from Tasks
 
32
  # Model information
33
  auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
34
  auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
 
 
 
 
 
 
35
  auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
36
 
37
  # We use make dataclass to dynamically fill the scores from Tasks
src/leaderboard/read_evals.py CHANGED
@@ -112,18 +112,10 @@ class EvalResult:
112
  average = sum([v for v in self.results.values() if v is not None]) / len(Tasks)
113
  data_dict = {
114
  "eval_name": self.eval_name, # not a column, just a save name,
115
- AutoEvalColumn.precision.name: self.precision.value.name,
116
- AutoEvalColumn.model_type.name: self.model_type.value.name,
117
- AutoEvalColumn.model_type_symbol.name: self.model_type.value.symbol,
118
- AutoEvalColumn.weight_type.name: self.weight_type.value.name,
119
  AutoEvalColumn.architecture.name: self.architecture,
120
  AutoEvalColumn.model.name: make_clickable_model(self.full_model),
121
  AutoEvalColumn.revision.name: self.revision,
122
  AutoEvalColumn.average.name: average,
123
- AutoEvalColumn.license.name: self.license,
124
- AutoEvalColumn.likes.name: self.likes,
125
- AutoEvalColumn.params.name: self.num_params,
126
- AutoEvalColumn.still_on_hub.name: self.still_on_hub,
127
  }
128
 
129
  for task in Tasks:
@@ -177,6 +169,7 @@ def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResu
177
  # Creation of result
178
  eval_result = EvalResult.init_from_json_file(model_result_filepath)
179
  # eval_result.update_with_request_file(requests_path)
 
180
 
181
  # Store results of same eval together
182
  eval_name = eval_result.eval_name
 
112
  average = sum([v for v in self.results.values() if v is not None]) / len(Tasks)
113
  data_dict = {
114
  "eval_name": self.eval_name, # not a column, just a save name,
 
 
 
 
115
  AutoEvalColumn.architecture.name: self.architecture,
116
  AutoEvalColumn.model.name: make_clickable_model(self.full_model),
117
  AutoEvalColumn.revision.name: self.revision,
118
  AutoEvalColumn.average.name: average,
 
 
 
 
119
  }
120
 
121
  for task in Tasks:
 
169
  # Creation of result
170
  eval_result = EvalResult.init_from_json_file(model_result_filepath)
171
  # eval_result.update_with_request_file(requests_path)
172
+ print(eval_result)
173
 
174
  # Store results of same eval together
175
  eval_name = eval_result.eval_name