shigeki Ishida commited on
Commit
e1d8ae4
β€’
1 Parent(s): bbe855d

add vllm version

Browse files
Files changed (3) hide show
  1. src/display/utils.py +11 -0
  2. src/envs.py +3 -0
  3. src/submission/submit.py +10 -4
src/display/utils.py CHANGED
@@ -4,6 +4,7 @@ from enum import Enum
4
  import pandas as pd
5
 
6
  from src.about import Tasks, TaskType
 
7
 
8
 
9
  def fields(raw_class):
@@ -169,6 +170,16 @@ class Backend(Enum):
169
  return Backend.Unknown
170
 
171
 
 
 
 
 
 
 
 
 
 
 
172
  # Column selection
173
  COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]
174
  TYPES = [c.type for c in fields(AutoEvalColumn)]
 
4
  import pandas as pd
5
 
6
  from src.about import Tasks, TaskType
7
+ from src.envs import VLLM_CURRENT_VERSION
8
 
9
 
10
  def fields(raw_class):
 
170
  return Backend.Unknown
171
 
172
 
173
+ class VllmVersion(Enum):
174
+ current = ModelDetails(VLLM_CURRENT_VERSION)
175
+ Unknown = ModelDetails("?")
176
+
177
+ def from_str(version):
178
+ if version == VLLM_CURRENT_VERSION:
179
+ return VllmVersion.current
180
+ return VllmVersion.Unknown
181
+
182
+
183
  # Column selection
184
  COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]
185
  TYPES = [c.type for c in fields(AutoEvalColumn)]
src/envs.py CHANGED
@@ -22,4 +22,7 @@ EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results")
22
  EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
23
  EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
24
 
 
 
 
25
  API = HfApi(token=TOKEN)
 
22
  EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
23
  EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
24
 
25
+ # vllm version
26
+ VLLM_CURRENT_VERSION = "v0.6.3"
27
+
28
  API = HfApi(token=TOKEN)
src/submission/submit.py CHANGED
@@ -4,7 +4,7 @@ from datetime import datetime, timezone
4
 
5
  from src.display.formatting import styled_error, styled_message, styled_warning
6
  from src.display.utils import Version
7
- from src.envs import API, EVAL_REQUESTS_PATH, QUEUE_REPO, TOKEN
8
  from src.submission.check_validity import already_submitted_models, check_model_card, get_model_size, is_model_on_hub
9
 
10
  REQUESTED_MODELS = None
@@ -24,11 +24,14 @@ def add_new_eval(
24
  REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
25
 
26
  current_version = Version.v1_4_1.value.name
 
27
 
28
  # γƒγƒΌγ‚Έγƒ§γƒ³ζƒ…ε ±γ‚’ε«γ‚γŸι‡θ€‡γƒγ‚§γƒƒγ‚―
29
- submission_id = f"{model}_{precision}_{add_special_tokens}_{current_version}"
30
  if submission_id in REQUESTED_MODELS:
31
- return styled_warning(f"This model has already been evaluated with llm-jp-eval version {current_version}")
 
 
32
 
33
  user_name = ""
34
  model_path = model
@@ -85,6 +88,7 @@ def add_new_eval(
85
  "private": False,
86
  "add_special_tokens": add_special_tokens,
87
  "llm_jp_eval_version": current_version,
 
88
  }
89
 
90
  # Check for duplicate submission
@@ -94,7 +98,9 @@ def add_new_eval(
94
  print("Creating eval file")
95
  OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
96
  os.makedirs(OUT_DIR, exist_ok=True)
97
- out_path = f"{OUT_DIR}/{model_path}_eval_request_False_{precision}_{add_special_tokens}.json"
 
 
98
 
99
  with open(out_path, "w") as f:
100
  f.write(json.dumps(eval_entry))
 
4
 
5
  from src.display.formatting import styled_error, styled_message, styled_warning
6
  from src.display.utils import Version
7
+ from src.envs import API, EVAL_REQUESTS_PATH, QUEUE_REPO, TOKEN, VLLM_CURRENT_VERSION
8
  from src.submission.check_validity import already_submitted_models, check_model_card, get_model_size, is_model_on_hub
9
 
10
  REQUESTED_MODELS = None
 
24
  REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
25
 
26
  current_version = Version.v1_4_1.value.name
27
+ current_vllm_version = VLLM_CURRENT_VERSION
28
 
29
  # γƒγƒΌγ‚Έγƒ§γƒ³ζƒ…ε ±γ‚’ε«γ‚γŸι‡θ€‡γƒγ‚§γƒƒγ‚―
30
+ submission_id = f"{model}_{precision}_{add_special_tokens}_{current_version}_{current_vllm_version}"
31
  if submission_id in REQUESTED_MODELS:
32
+ return styled_warning(
33
+ f"This model has already been evaluated with llm-jp-eval version {current_version} and vllm version {current_vllm_version}"
34
+ )
35
 
36
  user_name = ""
37
  model_path = model
 
88
  "private": False,
89
  "add_special_tokens": add_special_tokens,
90
  "llm_jp_eval_version": current_version,
91
+ "vllm_version": current_vllm_version,
92
  }
93
 
94
  # Check for duplicate submission
 
98
  print("Creating eval file")
99
  OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
100
  os.makedirs(OUT_DIR, exist_ok=True)
101
+ out_path = (
102
+ f"{OUT_DIR}/{model_path}_eval_request_False_{precision}_{add_special_tokens}_{current_vllm_version}.json"
103
+ )
104
 
105
  with open(out_path, "w") as f:
106
  f.write(json.dumps(eval_entry))