orionweller commited on
Commit
77cc9e7
1 Parent(s): b5c28bd

add bi-encoder button

Browse files
Files changed (2) hide show
  1. app.py +11 -3
  2. model_meta.yaml +4 -1
app.py CHANGED
@@ -44,12 +44,12 @@ TASK_DESCRIPTIONS["Overall"] = "Overall performance across MTEB tasks."
44
  SENTENCE_TRANSFORMERS_COMPATIBLE_MODELS = {k for k,v in MODEL_META["model_meta"].items() if v.get("is_sentence_transformers_compatible", False)}
45
  MODELS_TO_SKIP = MODEL_META["models_to_skip"]
46
  CROSS_ENCODERS = MODEL_META["cross_encoders"]
 
47
 
48
  PROPRIETARY_MODELS = {
49
  make_clickable_model(model, link=EXTERNAL_MODEL_TO_LINK.get(model, f"https://huggingface.co/spaces/{REPO_ID}"))
50
  for model in PROPRIETARY_MODELS
51
  }
52
-
53
  SENTENCE_TRANSFORMERS_COMPATIBLE_MODELS = {
54
  make_clickable_model(model, link=EXTERNAL_MODEL_TO_LINK.get(model, f"https://huggingface.co/spaces/{REPO_ID}"))
55
  for model in SENTENCE_TRANSFORMERS_COMPATIBLE_MODELS
@@ -58,6 +58,11 @@ CROSS_ENCODERS = {
58
  make_clickable_model(model, link=EXTERNAL_MODEL_TO_LINK.get(model, f"https://huggingface.co/spaces/{REPO_ID}"))
59
  for model in CROSS_ENCODERS
60
  }
 
 
 
 
 
61
 
62
  TASK_TO_TASK_TYPE = {task_category: [] for task_category in TASKS}
63
  for board_config in BOARDS_CONFIG.values():
@@ -482,7 +487,8 @@ MODEL_TYPES = [
482
  "Open",
483
  "Proprietary",
484
  "Sentence Transformers",
485
- "Cross Encoders"
 
486
  ]
487
 
488
  def filter_data(search_query, model_types, model_sizes, *full_dataframes):
@@ -506,8 +512,10 @@ def filter_data(search_query, model_types, model_sizes, *full_dataframes):
506
  masks.append(df["Model"].isin(PROPRIETARY_MODELS))
507
  elif model_type == "Sentence Transformers":
508
  masks.append(df["Model"].isin(SENTENCE_TRANSFORMERS_COMPATIBLE_MODELS))
509
- elif model_type == "Cross Encoders":
510
  masks.append(df["Model"].isin(CROSS_ENCODERS))
 
 
511
  if masks:
512
  df = df[reduce(lambda a, b: a | b, masks)]
513
  else:
 
44
  SENTENCE_TRANSFORMERS_COMPATIBLE_MODELS = {k for k,v in MODEL_META["model_meta"].items() if v.get("is_sentence_transformers_compatible", False)}
45
  MODELS_TO_SKIP = MODEL_META["models_to_skip"]
46
  CROSS_ENCODERS = MODEL_META["cross_encoders"]
47
+ BI_ENCODERS = [k for k, _ in MODEL_META["model_meta"].items() if k not in CROSS_ENCODERS + ["bm25"]]
48
 
49
  PROPRIETARY_MODELS = {
50
  make_clickable_model(model, link=EXTERNAL_MODEL_TO_LINK.get(model, f"https://huggingface.co/spaces/{REPO_ID}"))
51
  for model in PROPRIETARY_MODELS
52
  }
 
53
  SENTENCE_TRANSFORMERS_COMPATIBLE_MODELS = {
54
  make_clickable_model(model, link=EXTERNAL_MODEL_TO_LINK.get(model, f"https://huggingface.co/spaces/{REPO_ID}"))
55
  for model in SENTENCE_TRANSFORMERS_COMPATIBLE_MODELS
 
58
  make_clickable_model(model, link=EXTERNAL_MODEL_TO_LINK.get(model, f"https://huggingface.co/spaces/{REPO_ID}"))
59
  for model in CROSS_ENCODERS
60
  }
61
+ BI_ENCODERS = {
62
+ make_clickable_model(model, link=EXTERNAL_MODEL_TO_LINK.get(model, f"https://huggingface.co/spaces/{REPO_ID}"))
63
+ for model in BI_ENCODERS
64
+ }
65
+
66
 
67
  TASK_TO_TASK_TYPE = {task_category: [] for task_category in TASKS}
68
  for board_config in BOARDS_CONFIG.values():
 
487
  "Open",
488
  "Proprietary",
489
  "Sentence Transformers",
490
+ "Cross-Encoders",
491
+ "Bi-Encoders"
492
  ]
493
 
494
  def filter_data(search_query, model_types, model_sizes, *full_dataframes):
 
512
  masks.append(df["Model"].isin(PROPRIETARY_MODELS))
513
  elif model_type == "Sentence Transformers":
514
  masks.append(df["Model"].isin(SENTENCE_TRANSFORMERS_COMPATIBLE_MODELS))
515
+ elif model_type == "Cross-Encoders":
516
  masks.append(df["Model"].isin(CROSS_ENCODERS))
517
+ elif model_type == "Bi-Encoders":
518
+ masks.append(df["Model"].isin(BI_ENCODERS))
519
  if masks:
520
  df = df[reduce(lambda a, b: a | b, masks)]
521
  else:
model_meta.yaml CHANGED
@@ -1306,4 +1306,7 @@ cross_encoders:
1306
  - flan-t5-large
1307
  - monobert-large-msmarco
1308
  - monot5-3b-msmarco-10k
1309
- - monot5-base-msmarco-10
 
 
 
 
1306
  - flan-t5-large
1307
  - monobert-large-msmarco
1308
  - monot5-3b-msmarco-10k
1309
+ - monot5-base-msmarco-10k
1310
+ - llama-2-7b-chat
1311
+ - mistral-7b-instruct-v0.2
1312
+ - tart-full-flan-t5-xl