AppleSwing commited on
Commit
bc48941
β€’
1 Parent(s): 22ce8a7

add choices for GPU and Solve leaderboard issue

Browse files
app.py CHANGED
@@ -217,7 +217,7 @@ with demo:
217
  search_bar = gr.Textbox(
218
  placeholder=" πŸ” Model search (separate multiple queries with `;`)",
219
  show_label=False,
220
- elem_id="search-bar",
221
  )
222
  with gr.Row():
223
  shown_columns = gr.CheckboxGroup(
@@ -306,14 +306,14 @@ with demo:
306
  filter_columns_size,
307
  search_bar,
308
  ],
309
- leaderboard_table,
310
  )
311
 
312
  # Check query parameter once at startup and update search bar
313
- # demo.load(load_query, inputs=[], outputs=[search_bar])
314
 
315
  for selector in [shown_columns, filter_columns_type, filter_columns_precision, filter_columns_size]:
316
- selector.change(
317
  update_table,
318
  [
319
  hidden_leaderboard_table_for_search,
 
217
  search_bar = gr.Textbox(
218
  placeholder=" πŸ” Model search (separate multiple queries with `;`)",
219
  show_label=False,
220
+ elem_id="search-bar"
221
  )
222
  with gr.Row():
223
  shown_columns = gr.CheckboxGroup(
 
306
  filter_columns_size,
307
  search_bar,
308
  ],
309
+ leaderboard_table
310
  )
311
 
312
  # Check query parameter once at startup and update search bar
313
+ demo.load(load_query, inputs=[], outputs=[search_bar])
314
 
315
  for selector in [shown_columns, filter_columns_type, filter_columns_precision, filter_columns_size]:
316
+ selector.select(
317
  update_table,
318
  [
319
  hidden_leaderboard_table_for_search,
backend-cli.py CHANGED
@@ -16,13 +16,13 @@ from src.backend.envs import Tasks, EVAL_REQUESTS_PATH_BACKEND, EVAL_RESULTS_PAT
16
  from src.backend.manage_requests import EvalRequest
17
  from src.leaderboard.read_evals import EvalResult
18
 
19
- from src.envs import QUEUE_REPO, RESULTS_REPO, API
20
  from src.utils import my_snapshot_download, analyze_gpu_stats, parse_nvidia_smi, monitor_gpus
21
 
22
  from src.leaderboard.read_evals import get_raw_eval_results
23
 
24
  from typing import Optional
25
-
26
  import time
27
 
28
  import pprint
@@ -364,9 +364,22 @@ def maybe_refresh_results(thr: int, hard_task_lst: Optional[list[str]] = None) -
364
  return False
365
 
366
 
 
 
 
 
 
 
 
 
 
 
367
  def process_pending_requests() -> bool:
 
 
 
368
  sanity_checks()
369
-
370
  current_pending_status = [PENDING_STATUS]
371
 
372
  # Get all eval request that are PENDING, if you want to run other evals, change this parameter
@@ -385,6 +398,12 @@ def process_pending_requests() -> bool:
385
 
386
  eval_request = eval_requests[0]
387
  pp.pprint(eval_request)
 
 
 
 
 
 
388
 
389
  my_snapshot_download(
390
  repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH_BACKEND, repo_type="dataset", max_workers=60
@@ -426,6 +445,7 @@ def get_args():
426
  parser.add_argument("--precision", type=str, default="float32,float16,8bit,4bit", help="Precision to debug")
427
  parser.add_argument("--inference-framework", type=str, default="hf-chat", help="Inference framework to debug")
428
  parser.add_argument("--limit", type=int, default=None, help="Limit for the number of samples")
 
429
  return parser.parse_args()
430
 
431
 
@@ -454,7 +474,8 @@ if __name__ == "__main__":
454
  status="",
455
  json_filepath="",
456
  precision=precision, # Use precision from arguments
457
- inference_framework=args.inference_framework # Use inference framework from arguments
 
458
  )
459
  results = process_evaluation(task, eval_request, limit=args.limit)
460
  except Exception as e:
 
16
  from src.backend.manage_requests import EvalRequest
17
  from src.leaderboard.read_evals import EvalResult
18
 
19
+ from src.envs import QUEUE_REPO, RESULTS_REPO, API, DEBUG_QUEUE_REPO
20
  from src.utils import my_snapshot_download, analyze_gpu_stats, parse_nvidia_smi, monitor_gpus
21
 
22
  from src.leaderboard.read_evals import get_raw_eval_results
23
 
24
  from typing import Optional
25
+ import GPUtil
26
  import time
27
 
28
  import pprint
 
364
  return False
365
 
366
 
367
+ def get_gpu_details():
368
+ gpus = GPUtil.getGPUs()
369
+ gpu = gpus[0]
370
+ name = gpu.name.replace(" ", "-")
371
+ # Convert memory from MB to GB and round to nearest whole number
372
+ memory_gb = round(gpu.memoryTotal / 1024)
373
+ memory = f"{memory_gb}GB"
374
+ formatted_name = f"{name}-{memory}"
375
+ return formatted_name
376
+
377
  def process_pending_requests() -> bool:
378
+ if args.debug:
379
+ QUEUE_REPO = DEBUG_QUEUE_REPO
380
+
381
  sanity_checks()
382
+ print("Processing pending requests")
383
  current_pending_status = [PENDING_STATUS]
384
 
385
  # Get all eval request that are PENDING, if you want to run other evals, change this parameter
 
398
 
399
  eval_request = eval_requests[0]
400
  pp.pprint(eval_request)
401
+
402
+ gpu_type = eval_request.gpu_type
403
+ curr_gpu_type = get_gpu_details()
404
+ if gpu_type != curr_gpu_type:
405
+ print(f"GPU type mismatch: {gpu_type} vs {curr_gpu_type}")
406
+ return False
407
 
408
  my_snapshot_download(
409
  repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH_BACKEND, repo_type="dataset", max_workers=60
 
445
  parser.add_argument("--precision", type=str, default="float32,float16,8bit,4bit", help="Precision to debug")
446
  parser.add_argument("--inference-framework", type=str, default="hf-chat", help="Inference framework to debug")
447
  parser.add_argument("--limit", type=int, default=None, help="Limit for the number of samples")
448
+ parser.add_argument("--gpu-type", type=str, default="NVIDIA-A100-PCIe-80GB", help="GPU type")
449
  return parser.parse_args()
450
 
451
 
 
474
  status="",
475
  json_filepath="",
476
  precision=precision, # Use precision from arguments
477
+ inference_framework=args.inference_framework, # Use inference framework from arguments
478
+ gpu_type=args.gpu_type
479
  )
480
  results = process_evaluation(task, eval_request, limit=args.limit)
481
  except Exception as e:
src/backend/manage_requests.py CHANGED
@@ -28,6 +28,7 @@ class EvalRequest:
28
  params: Optional[int] = None
29
  license: Optional[str] = ""
30
  batch_size: Optional[int] = 1
 
31
 
32
  def get_model_args(self) -> str:
33
  model_args = f"pretrained={self.model},revision={self.revision},parallelize=True" # ,max_length=4096"
 
28
  params: Optional[int] = None
29
  license: Optional[str] = ""
30
  batch_size: Optional[int] = 1
31
+ gpu_type: Optional[str] = "NVIDIA-A100-PCIe-80GB"
32
 
33
  def get_model_args(self) -> str:
34
  model_args = f"pretrained={self.model},revision={self.revision},parallelize=True" # ,max_length=4096"
src/display/utils.py CHANGED
@@ -193,7 +193,7 @@ class InferenceFramework(Enum):
193
  class GPUType(Enum):
194
  H100_pcie = ModelDetails("NVIDIA-H100-PCIe-80GB")
195
  A100_pcie = ModelDetails("NVIDIA-A100-PCIe-80GB")
196
- A5000 = ModelDetails("NVIDIA-A5000-24GB")
197
  Unknown = ModelDetails("?")
198
 
199
  def to_str(self):
 
193
  class GPUType(Enum):
194
  H100_pcie = ModelDetails("NVIDIA-H100-PCIe-80GB")
195
  A100_pcie = ModelDetails("NVIDIA-A100-PCIe-80GB")
196
+ A5000 = ModelDetails("NVIDIA-RTX-A5000-24GB")
197
  Unknown = ModelDetails("?")
198
 
199
  def to_str(self):
src/submission/submit.py CHANGED
@@ -115,7 +115,7 @@ def add_new_eval(
115
  "params": model_size,
116
  "license": license,
117
  "inference_framework": inference_framework,
118
- "GPU_type": gpu_type
119
  }
120
 
121
  # Check for duplicate submission
 
115
  "params": model_size,
116
  "license": license,
117
  "inference_framework": inference_framework,
118
+ "gpu_type": gpu_type
119
  }
120
 
121
  # Check for duplicate submission