Alina Lozovskaia commited on
Commit
2293858
1 Parent(s): e34e357

Updated collections.py

Browse files
Files changed (2) hide show
  1. src/populate.py +0 -3
  2. src/tools/collections.py +48 -53
src/populate.py CHANGED
@@ -15,12 +15,9 @@ def get_leaderboard_df(
15
  raw_data = get_raw_eval_results(results_path=results_path, requests_path=requests_path, dynamic_path=dynamic_path)
16
  all_data_json = [v.to_dict() for v in raw_data]
17
  all_data_json.append(baseline_row)
18
- # print([data for data in all_data_json if data["model_name_for_query"] == "databricks/dbrx-base"])
19
  filter_models_flags(all_data_json)
20
 
21
  df = pd.DataFrame.from_records(all_data_json)
22
- # print(df.columns)
23
- # print(df[df["model_name_for_query"] == "databricks/dbrx-base"])
24
  df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
25
  df = df[cols].round(decimals=2)
26
 
 
15
  raw_data = get_raw_eval_results(results_path=results_path, requests_path=requests_path, dynamic_path=dynamic_path)
16
  all_data_json = [v.to_dict() for v in raw_data]
17
  all_data_json.append(baseline_row)
 
18
  filter_models_flags(all_data_json)
19
 
20
  df = pd.DataFrame.from_records(all_data_json)
 
 
21
  df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
22
  df = df[cols].round(decimals=2)
23
 
src/tools/collections.py CHANGED
@@ -17,65 +17,60 @@ intervals = {
17
  }
18
 
19
 
20
- def update_collections(df: DataFrame):
21
- """This function updates the Open LLM Leaderboard model collection with the latest best models for
22
- each size category and type.
23
- """
24
- collection = get_collection(collection_slug=PATH_TO_COLLECTION, token=H4_TOKEN)
25
  params_column = pd.to_numeric(df[AutoEvalColumn.params.name], errors="coerce")
 
 
26
 
27
- cur_best_models = []
28
 
29
- ix = 0
30
- for type in ModelType:
31
- if type.value.name == "":
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  continue
33
- for size in intervals:
34
- # We filter the df to gather the relevant models
35
- type_emoji = [t[0] for t in type.value.symbol]
36
- filtered_df = df[df[AutoEvalColumn.model_type_symbol.name].isin(type_emoji)]
37
 
38
- numeric_interval = pd.IntervalIndex([intervals[size]])
39
- mask = params_column.apply(lambda x: any(numeric_interval.contains(x)))
40
- filtered_df = filtered_df.loc[mask]
41
 
 
 
 
 
 
 
 
 
 
 
42
  best_models = list(
43
- filtered_df.sort_values(AutoEvalColumn.average.name, ascending=False)[AutoEvalColumn.dummy.name]
44
  )
45
- print(type.value.symbol, size, best_models[:10])
 
 
46
 
47
- # We add them one by one to the leaderboard
48
- for model in best_models:
49
- ix += 1
50
- cur_len_collection = len(collection.items)
51
- try:
52
- collection = add_collection_item(
53
- PATH_TO_COLLECTION,
54
- item_id=model,
55
- item_type="model",
56
- exists_ok=True,
57
- note=f"Best {type.to_str(' ')} model of around {size} on the leaderboard today!",
58
- token=H4_TOKEN,
59
- )
60
- if (
61
- len(collection.items) > cur_len_collection
62
- ): # we added an item - we make sure its position is correct
63
- item_object_id = collection.items[-1].item_object_id
64
- update_collection_item(
65
- collection_slug=PATH_TO_COLLECTION, item_object_id=item_object_id, position=ix
66
- )
67
- cur_len_collection = len(collection.items)
68
- cur_best_models.append(model)
69
- break
70
- except HfHubHTTPError:
71
- continue
72
-
73
- collection = get_collection(PATH_TO_COLLECTION, token=H4_TOKEN)
74
- for item in collection.items:
75
- if item.item_id not in cur_best_models:
76
- try:
77
- delete_collection_item(
78
- collection_slug=PATH_TO_COLLECTION, item_object_id=item.item_object_id, token=H4_TOKEN
79
- )
80
- except HfHubHTTPError:
81
- continue
 
17
  }
18
 
19
 
20
+ def filter_by_type_and_size(df, model_type, size_interval):
21
+ """Filter DataFrame by model type and parameter size interval."""
22
+ type_emoji = model_type.value.symbol[0]
23
+ filtered_df = df[df[AutoEvalColumn.model_type_symbol.name] == type_emoji]
 
24
  params_column = pd.to_numeric(df[AutoEvalColumn.params.name], errors="coerce")
25
+ mask = params_column.apply(lambda x: x in size_interval)
26
+ return filtered_df.loc[mask]
27
 
 
28
 
29
+ def add_models_to_collection(collection, models, model_type, size):
30
+ """Add best models to the collection and update positions."""
31
+ cur_len_collection = len(collection.items)
32
+ for ix, model in enumerate(models, start=1):
33
+ try:
34
+ collection = add_collection_item(
35
+ PATH_TO_COLLECTION,
36
+ item_id=model,
37
+ item_type="model",
38
+ exists_ok=True,
39
+ note=f"Best {model_type.to_str(' ')} model of around {size} on the leaderboard today!",
40
+ token=H4_TOKEN,
41
+ )
42
+ # Ensure position is correct if item was added
43
+ if len(collection.items) > cur_len_collection:
44
+ item_object_id = collection.items[-1].item_object_id
45
+ update_collection_item(collection_slug=PATH_TO_COLLECTION, item_object_id=item_object_id, position=ix)
46
+ cur_len_collection = len(collection.items)
47
+ break # assuming we only add the top model
48
+ except HfHubHTTPError:
49
  continue
 
 
 
 
50
 
 
 
 
51
 
52
+ def update_collections(df: DataFrame):
53
+ """Update collections by filtering and adding the best models."""
54
+ collection = get_collection(collection_slug=PATH_TO_COLLECTION, token=H4_TOKEN)
55
+ cur_best_models = []
56
+
57
+ for model_type in ModelType:
58
+ if not model_type.value.name:
59
+ continue
60
+ for size, interval in intervals.items():
61
+ filtered_df = filter_by_type_and_size(df, model_type, interval)
62
  best_models = list(
63
+ filtered_df.sort_values(AutoEvalColumn.average.name, ascending=False)[AutoEvalColumn.dummy.name][:10]
64
  )
65
+ print(model_type.value.symbol, size, best_models[:10])
66
+ add_models_to_collection(collection, best_models, model_type, size)
67
+ cur_best_models.extend(best_models)
68
 
69
+ # Cleanup
70
+ existing_models = {item.item_id for item in collection.items}
71
+ to_remove = existing_models - set(cur_best_models)
72
+ for item_id in to_remove:
73
+ try:
74
+ delete_collection_item(collection_slug=PATH_TO_COLLECTION, item_object_id=item_id, token=H4_TOKEN)
75
+ except HfHubHTTPError:
76
+ continue