Fabian Wolf commited on
Commit
63fdf20
Β·
1 Parent(s): cec432e

Change model types

Browse files
src/display/utils.py CHANGED
@@ -62,9 +62,9 @@ class ModelDetails:
62
 
63
 
64
  class ModelType(Enum):
65
- PT = ModelDetails(name="pretrained", symbol="🟒")
66
- FT = ModelDetails(name="fine-tuned", symbol="πŸ”Ά")
67
- IFT = ModelDetails(name="instruction-tuned", symbol="β­•")
68
  RL = ModelDetails(name="RL-tuned", symbol="🟦")
69
  Unknown = ModelDetails(name="", symbol="?")
70
 
@@ -73,13 +73,13 @@ class ModelType(Enum):
73
 
74
  @staticmethod
75
  def from_str(type):
76
- if "fine-tuned" in type or "πŸ”Ά" in type:
77
  return ModelType.FT
78
- if "pretrained" in type or "🟒" in type:
79
  return ModelType.PT
80
  if "RL-tuned" in type or "🟦" in type:
81
  return ModelType.RL
82
- if "instruction-tuned" in type or "β­•" in type:
83
  return ModelType.IFT
84
  return ModelType.Unknown
85
 
 
62
 
63
 
64
  class ModelType(Enum):
65
+ PT = ModelDetails(name="LLM", symbol="🟒")
66
+ FT = ModelDetails(name="Medical fine-tuned", symbol="πŸ”Ά")
67
+ IFT = ModelDetails(name="LLM with Vision", symbol="β­•")
68
  RL = ModelDetails(name="RL-tuned", symbol="🟦")
69
  Unknown = ModelDetails(name="", symbol="?")
70
 
 
73
 
74
  @staticmethod
75
  def from_str(type):
76
+ if "Medical fine-tuned" in type or "πŸ”Ά" in type:
77
  return ModelType.FT
78
+ if "LLM" in type or "🟒" in type:
79
  return ModelType.PT
80
  if "RL-tuned" in type or "🟦" in type:
81
  return ModelType.RL
82
+ if "LLM with Vision" in type or "β­•" in type:
83
  return ModelType.IFT
84
  return ModelType.Unknown
85
 
src/leaderboard/read_evals.py CHANGED
@@ -43,6 +43,8 @@ class EvalResult:
43
  # Precision
44
  precision = Precision.from_str(config.get("model_dtype"))
45
 
 
 
46
  self.num_params = int(config.get("model_params", 0))
47
 
48
  # Get model and org
@@ -59,14 +61,14 @@ class EvalResult:
59
  result_key = f"{org}_{model}_{precision.value.name}"
60
  full_model = "/".join(org_and_model)
61
 
62
- still_on_hub, _, model_config = is_model_on_hub(
63
- full_model, config.get("model_sha", "main"), trust_remote_code=True, test_tokenizer=False
64
- )
65
- architecture = "?"
66
- if model_config is not None:
67
- architectures = getattr(model_config, "architectures", None)
68
- if architectures:
69
- architecture = ";".join(architectures)
70
 
71
  # Extract results available in this file (some results are split in several files)
72
  results = {}
@@ -89,8 +91,8 @@ class EvalResult:
89
  results=results,
90
  precision=precision,
91
  revision= config.get("model_sha", ""),
92
- still_on_hub=still_on_hub,
93
- architecture=architecture
94
  )
95
 
96
  # def update_with_request_file(self, requests_path):
 
43
  # Precision
44
  precision = Precision.from_str(config.get("model_dtype"))
45
 
46
+ self.model_type = ModelType.from_str(config.get("model_type", "LLM"))
47
+
48
  self.num_params = int(config.get("model_params", 0))
49
 
50
  # Get model and org
 
61
  result_key = f"{org}_{model}_{precision.value.name}"
62
  full_model = "/".join(org_and_model)
63
 
64
+ # still_on_hub, _, model_config = is_model_on_hub(
65
+ # full_model, config.get("model_sha", "main"), trust_remote_code=True, test_tokenizer=False
66
+ # )
67
+ # architecture = "?"
68
+ # if model_config is not None:
69
+ # architectures = getattr(model_config, "architectures", None)
70
+ # if architectures:
71
+ # architecture = ";".join(architectures)
72
 
73
  # Extract results available in this file (some results are split in several files)
74
  results = {}
 
91
  results=results,
92
  precision=precision,
93
  revision= config.get("model_sha", ""),
94
+ still_on_hub=self.still_on_hub,
95
+ architecture=self.architecture
96
  )
97
 
98
  # def update_with_request_file(self, requests_path):