Muennighoff bryant1410 commited on
Commit
953a5fe
1 Parent(s): ef1ca24

Simplify the code and save some memory (#8)

Browse files

- Simplify the code and save some memory (c4bdf3be28eb0b9b204b04389b60d86a67f192e4)


Co-authored-by: Santiago Castro <bryant1410@users.noreply.huggingface.co>

Files changed (1) hide show
  1. results.py +14 -14
results.py CHANGED
@@ -1,6 +1,7 @@
1
  """MTEB Results"""
2
 
3
  import json
 
4
  import datasets
5
 
6
 
@@ -91,21 +92,21 @@ MODELS = [
91
  "xlm-roberta-large",
92
  ]
93
 
 
94
  # Needs to be run whenever new files are added
95
  def get_paths():
96
- import json, os
97
- files = {}
98
  for model_dir in os.listdir("results"):
99
  results_model_dir = os.path.join("results", model_dir)
100
- if not(os.path.isdir(results_model_dir)):
101
  print(f"Skipping {results_model_dir}")
102
  continue
103
  for res_file in os.listdir(results_model_dir):
104
  if res_file.endswith(".json"):
105
  results_model_file = os.path.join(results_model_dir, res_file)
106
- files.setdefault(model_dir, [])
107
  files[model_dir].append(results_model_file)
108
- with open(f"paths.json", "w") as f:
109
  json.dump(files, f)
110
  return files
111
 
@@ -113,7 +114,6 @@ def get_paths():
113
  class MTEBResults(datasets.GeneratorBasedBuilder):
114
  """MTEBResults"""
115
 
116
-
117
  BUILDER_CONFIGS = [
118
  datasets.BuilderConfig(
119
  name=model,
@@ -140,9 +140,9 @@ class MTEBResults(datasets.GeneratorBasedBuilder):
140
 
141
  def _split_generators(self, dl_manager):
142
  path_file = dl_manager.download_and_extract(URL)
143
- with open(path_file, "r") as f:
144
  files = json.load(f)
145
-
146
  downloaded_files = dl_manager.download_and_extract(files[self.config.name])
147
  return [
148
  datasets.SplitGenerator(
@@ -153,12 +153,12 @@ class MTEBResults(datasets.GeneratorBasedBuilder):
153
 
154
  def _generate_examples(self, filepath):
155
  """This function returns the examples in the raw (text) form."""
156
- logger.info("Generating examples from {}".format(filepath))
157
-
158
  out = []
159
 
160
  for path in filepath:
161
- with open(path, "r", encoding="utf-8") as f:
162
  res_dict = json.load(f)
163
  ds_name = res_dict["mteb_dataset_name"]
164
  split = "test"
@@ -168,16 +168,16 @@ class MTEBResults(datasets.GeneratorBasedBuilder):
168
  print(f"Skipping {ds_name} as split {split} not present.")
169
  continue
170
  res_dict = res_dict.get(split)
171
- is_multilingual = True if any([x in res_dict for x in EVAL_LANGS]) else False
172
  langs = res_dict.keys() if is_multilingual else ["en"]
173
  for lang in langs:
174
  if lang in SKIP_KEYS: continue
175
  test_result_lang = res_dict.get(lang) if is_multilingual else res_dict
176
- for (metric, score) in test_result_lang.items():
177
  if not isinstance(score, dict):
178
  score = {metric: score}
179
  for sub_metric, sub_score in score.items():
180
- if any([x in sub_metric for x in SKIP_KEYS]): continue
181
  out.append({
182
  "mteb_dataset_name": ds_name,
183
  "eval_language": lang if is_multilingual else "",
 
1
  """MTEB Results"""
2
 
3
  import json
4
+
5
  import datasets
6
 
7
 
 
92
  "xlm-roberta-large",
93
  ]
94
 
95
+
96
  # Needs to be run whenever new files are added
97
  def get_paths():
98
+ import collections, os
99
+ files = collections.defaultdict(list)
100
  for model_dir in os.listdir("results"):
101
  results_model_dir = os.path.join("results", model_dir)
102
+ if not os.path.isdir(results_model_dir):
103
  print(f"Skipping {results_model_dir}")
104
  continue
105
  for res_file in os.listdir(results_model_dir):
106
  if res_file.endswith(".json"):
107
  results_model_file = os.path.join(results_model_dir, res_file)
 
108
  files[model_dir].append(results_model_file)
109
+ with open("paths.json", "w") as f:
110
  json.dump(files, f)
111
  return files
112
 
 
114
  class MTEBResults(datasets.GeneratorBasedBuilder):
115
  """MTEBResults"""
116
 
 
117
  BUILDER_CONFIGS = [
118
  datasets.BuilderConfig(
119
  name=model,
 
140
 
141
  def _split_generators(self, dl_manager):
142
  path_file = dl_manager.download_and_extract(URL)
143
+ with open(path_file) as f:
144
  files = json.load(f)
145
+
146
  downloaded_files = dl_manager.download_and_extract(files[self.config.name])
147
  return [
148
  datasets.SplitGenerator(
 
153
 
154
  def _generate_examples(self, filepath):
155
  """This function returns the examples in the raw (text) form."""
156
+ logger.info(f"Generating examples from {filepath}")
157
+
158
  out = []
159
 
160
  for path in filepath:
161
+ with open(path, encoding="utf-8") as f:
162
  res_dict = json.load(f)
163
  ds_name = res_dict["mteb_dataset_name"]
164
  split = "test"
 
168
  print(f"Skipping {ds_name} as split {split} not present.")
169
  continue
170
  res_dict = res_dict.get(split)
171
+ is_multilingual = any(x in res_dict for x in EVAL_LANGS)
172
  langs = res_dict.keys() if is_multilingual else ["en"]
173
  for lang in langs:
174
  if lang in SKIP_KEYS: continue
175
  test_result_lang = res_dict.get(lang) if is_multilingual else res_dict
176
+ for metric, score in test_result_lang.items():
177
  if not isinstance(score, dict):
178
  score = {metric: score}
179
  for sub_metric, sub_score in score.items():
180
+ if any(x in sub_metric for x in SKIP_KEYS): continue
181
  out.append({
182
  "mteb_dataset_name": ds_name,
183
  "eval_language": lang if is_multilingual else "",