t0-0 commited on
Commit
3d26ec8
Β·
2 Parent(s): b246f21 14d8fbd

Merge remote-tracking branch 'origin/main'

Browse files
Files changed (4) hide show
  1. src/display/utils.py +11 -0
  2. src/envs.py +3 -0
  3. src/submission/submit.py +10 -4
  4. style.css +4 -2
src/display/utils.py CHANGED
@@ -4,6 +4,7 @@ from enum import Enum
4
  import pandas as pd
5
 
6
  from src.about import Tasks, TaskType
 
7
 
8
 
9
  def fields(raw_class):
@@ -169,6 +170,16 @@ class Backend(Enum):
169
  return Backend.Unknown
170
 
171
 
 
 
 
 
 
 
 
 
 
 
172
  # Column selection
173
  COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]
174
  TYPES = [c.type for c in fields(AutoEvalColumn)]
 
4
  import pandas as pd
5
 
6
  from src.about import Tasks, TaskType
7
+ from src.envs import VLLM_CURRENT_VERSION
8
 
9
 
10
  def fields(raw_class):
 
170
  return Backend.Unknown
171
 
172
 
173
+ class VllmVersion(Enum):
174
+ current = ModelDetails(VLLM_CURRENT_VERSION)
175
+ Unknown = ModelDetails("?")
176
+
177
+ def from_str(version):
178
+ if version == VLLM_CURRENT_VERSION:
179
+ return VllmVersion.current
180
+ return VllmVersion.Unknown
181
+
182
+
183
  # Column selection
184
  COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]
185
  TYPES = [c.type for c in fields(AutoEvalColumn)]
src/envs.py CHANGED
@@ -22,4 +22,7 @@ EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results")
22
  EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
23
  EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
24
 
 
 
 
25
  API = HfApi(token=TOKEN)
 
22
  EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
23
  EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
24
 
25
+ # vllm version
26
+ VLLM_CURRENT_VERSION = "v0.6.3"
27
+
28
  API = HfApi(token=TOKEN)
src/submission/submit.py CHANGED
@@ -4,7 +4,7 @@ from datetime import datetime, timezone
4
 
5
  from src.display.formatting import styled_error, styled_message, styled_warning
6
  from src.display.utils import Version
7
- from src.envs import API, EVAL_REQUESTS_PATH, QUEUE_REPO, TOKEN
8
  from src.submission.check_validity import already_submitted_models, check_model_card, get_model_size, is_model_on_hub
9
 
10
  REQUESTED_MODELS = None
@@ -24,11 +24,14 @@ def add_new_eval(
24
  REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
25
 
26
  current_version = Version.v1_4_1.value.name
 
27
 
28
  # γƒγƒΌγ‚Έγƒ§γƒ³ζƒ…ε ±γ‚’ε«γ‚γŸι‡θ€‡γƒγ‚§γƒƒγ‚―
29
- submission_id = f"{model}_{precision}_{add_special_tokens}_{current_version}"
30
  if submission_id in REQUESTED_MODELS:
31
- return styled_warning(f"This model has already been evaluated with llm-jp-eval version {current_version}")
 
 
32
 
33
  user_name = ""
34
  model_path = model
@@ -93,6 +96,7 @@ def add_new_eval(
93
  "add_special_tokens": add_special_tokens,
94
  "llm_jp_eval_version": current_version,
95
  "architecture": architecture,
 
96
  }
97
 
98
  # Check for duplicate submission
@@ -102,7 +106,9 @@ def add_new_eval(
102
  print("Creating eval file")
103
  OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
104
  os.makedirs(OUT_DIR, exist_ok=True)
105
- out_path = f"{OUT_DIR}/{model_path}_eval_request_False_{precision}_{add_special_tokens}.json"
 
 
106
 
107
  with open(out_path, "w") as f:
108
  f.write(json.dumps(eval_entry))
 
4
 
5
  from src.display.formatting import styled_error, styled_message, styled_warning
6
  from src.display.utils import Version
7
+ from src.envs import API, EVAL_REQUESTS_PATH, QUEUE_REPO, TOKEN, VLLM_CURRENT_VERSION
8
  from src.submission.check_validity import already_submitted_models, check_model_card, get_model_size, is_model_on_hub
9
 
10
  REQUESTED_MODELS = None
 
24
  REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
25
 
26
  current_version = Version.v1_4_1.value.name
27
+ current_vllm_version = VLLM_CURRENT_VERSION
28
 
29
  # γƒγƒΌγ‚Έγƒ§γƒ³ζƒ…ε ±γ‚’ε«γ‚γŸι‡θ€‡γƒγ‚§γƒƒγ‚―
30
+ submission_id = f"{model}_{precision}_{add_special_tokens}_{current_version}_{current_vllm_version}"
31
  if submission_id in REQUESTED_MODELS:
32
+ return styled_warning(
33
+ f"This model has already been evaluated with llm-jp-eval version {current_version} and vllm version {current_vllm_version}"
34
+ )
35
 
36
  user_name = ""
37
  model_path = model
 
96
  "add_special_tokens": add_special_tokens,
97
  "llm_jp_eval_version": current_version,
98
  "architecture": architecture,
99
+ "vllm_version": current_vllm_version,
100
  }
101
 
102
  # Check for duplicate submission
 
106
  print("Creating eval file")
107
  OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
108
  os.makedirs(OUT_DIR, exist_ok=True)
109
+ out_path = (
110
+ f"{OUT_DIR}/{model_path}_eval_request_False_{precision}_{add_special_tokens}_{current_vllm_version}.json"
111
+ )
112
 
113
  with open(out_path, "w") as f:
114
  f.write(json.dumps(eval_entry))
style.css CHANGED
@@ -109,7 +109,9 @@ table th:first-child {
109
  min-width: 200px!important;
110
  }
111
 
112
- /* Hide the modebar of Plotly */
113
  .modebar-group {
114
- display: none;
 
 
115
  }
 
109
  min-width: 200px!important;
110
  }
111
 
112
+ /* make the plotly modebar horizontal */
113
  .modebar-group {
114
+ display: flex;
115
+ flex-direction: row;
116
+ align-items: center;
117
  }