Files changed (2) hide show
  1. app.py +18 -2
  2. src/leaderboard/filter_models.py +0 -1
app.py CHANGED
@@ -1,4 +1,5 @@
1
  import os
 
2
  import logging
3
  import gradio as gr
4
  import pandas as pd
@@ -48,6 +49,8 @@ from src.submission.submit import add_new_eval
48
  from src.tools.collections import update_collections
49
  from src.tools.plots import create_metric_plot_obj, create_plot_df, create_scores_df
50
 
 
 
51
 
52
  # Start ephemeral Spaces on PRs (see config in README.md)
53
  enable_space_ci()
@@ -56,13 +59,23 @@ enable_space_ci()
56
  def restart_space():
57
  API.restart_space(repo_id=REPO_ID, token=H4_TOKEN)
58
 
59
-
 
 
 
 
 
 
 
 
 
 
60
  def download_dataset(repo_id, local_dir, repo_type="dataset", max_attempts=3):
61
  """Attempt to download dataset with retries."""
62
  attempt = 0
63
  while attempt < max_attempts:
64
  try:
65
- print(f"Downloading {repo_id} to {local_dir}")
66
  snapshot_download(
67
  repo_id=repo_id,
68
  local_dir=local_dir,
@@ -84,7 +97,10 @@ def init_space(full_init: bool = True):
84
  if full_init:
85
  # These downloads only occur on full initialization
86
  download_dataset(QUEUE_REPO, EVAL_REQUESTS_PATH)
 
87
  download_dataset(DYNAMIC_INFO_REPO, DYNAMIC_INFO_PATH)
 
 
88
  download_dataset(RESULTS_REPO, EVAL_RESULTS_PATH)
89
 
90
  # Always retrieve the leaderboard DataFrame
 
1
  import os
2
+ import time
3
  import logging
4
  import gradio as gr
5
  import pandas as pd
 
49
  from src.tools.collections import update_collections
50
  from src.tools.plots import create_metric_plot_obj, create_plot_df, create_scores_df
51
 
52
+ # Configure logging
53
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
54
 
55
  # Start ephemeral Spaces on PRs (see config in README.md)
56
  enable_space_ci()
 
59
  def restart_space():
60
  API.restart_space(repo_id=REPO_ID, token=H4_TOKEN)
61
 
62
+ def time_diff_wrapper(func):
63
+ def wrapper(*args, **kwargs):
64
+ start_time = time.time()
65
+ result = func(*args, **kwargs)
66
+ end_time = time.time()
67
+ diff = end_time - start_time
68
+ logging.info(f"Time taken for {func.__name__}: {diff} seconds")
69
+ return result
70
+ return wrapper
71
+
72
+ @time_diff_wrapper
73
  def download_dataset(repo_id, local_dir, repo_type="dataset", max_attempts=3):
74
  """Attempt to download dataset with retries."""
75
  attempt = 0
76
  while attempt < max_attempts:
77
  try:
78
+ logging.info(f"Downloading {repo_id} to {local_dir}")
79
  snapshot_download(
80
  repo_id=repo_id,
81
  local_dir=local_dir,
 
97
  if full_init:
98
  # These downloads only occur on full initialization
99
  download_dataset(QUEUE_REPO, EVAL_REQUESTS_PATH)
100
+ download_dataset(QUEUE_REPO, EVAL_REQUESTS_PATH)
101
  download_dataset(DYNAMIC_INFO_REPO, DYNAMIC_INFO_PATH)
102
+ download_dataset(DYNAMIC_INFO_REPO, DYNAMIC_INFO_PATH)
103
+ download_dataset(RESULTS_REPO, EVAL_RESULTS_PATH)
104
  download_dataset(RESULTS_REPO, EVAL_RESULTS_PATH)
105
 
106
  # Always retrieve the leaderboard DataFrame
src/leaderboard/filter_models.py CHANGED
@@ -138,7 +138,6 @@ def flag_models(leaderboard_data: list[dict]):
138
  else:
139
  flag_key = model_data[AutoEvalColumn.fullname.name]
140
 
141
- print(f"model check: {flag_key}")
142
  if flag_key in FLAGGED_MODELS:
143
  print(f"Flagged model: {flag_key}")
144
  issue_num = FLAGGED_MODELS[flag_key].split("/")[-1]
 
138
  else:
139
  flag_key = model_data[AutoEvalColumn.fullname.name]
140
 
 
141
  if flag_key in FLAGGED_MODELS:
142
  print(f"Flagged model: {flag_key}")
143
  issue_num = FLAGGED_MODELS[flag_key].split("/")[-1]