pminervini commited on
Commit
bef4eff
1 Parent(s): 18bd323
Files changed (1) hide show
  1. app.py +5 -6
app.py CHANGED
@@ -60,9 +60,6 @@ def init_space():
60
  dataset_df = get_dataset_summary_table(file_path='blog/Hallucination-Leaderboard-Summary.csv')
61
 
62
  if socket.gethostname() not in {'neuromancer'}:
63
- if (os.path.exists(EVAL_REQUESTS_PATH) and
64
- os.path.exists(EVAL_RESULTS_PATH) and
65
- (datetime.datetime.now() - datetime.datetime.fromtimestamp(os.path.getmtime(EVAL_REQUESTS_PATH))).days > 1):
66
  ui_snapshot_download(repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30)
67
  ui_snapshot_download(repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30)
68
 
@@ -202,6 +199,8 @@ with demo:
202
  interactive=True,
203
  elem_id="filter-columns-size")
204
 
 
 
205
  leaderboard_table = gr.components.Dataframe(
206
  value=leaderboard_df[
207
  [c.name for c in fields(AutoEvalColumn) if c.never_hidden] + shown_columns.value + [AutoEvalColumn.dummy.name]
@@ -250,7 +249,7 @@ with demo:
250
 
251
  with gr.TabItem("About", elem_id="llm-benchmark-tab-table", id=2):
252
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
253
- print(f'dataset df columns: {list(dataset_df.columns)}')
254
  dataset_table = gr.components.Dataframe(
255
  value=dataset_df,
256
  headers=list(dataset_df.columns),
@@ -258,8 +257,8 @@ with demo:
258
  elem_id="dataset-table",
259
  interactive=False,
260
  visible=True,
261
- column_widths=["15%", "20%"]
262
- )
263
  gr.Markdown(LLM_BENCHMARKS_DETAILS, elem_classes="markdown-text")
264
  gr.Markdown(FAQ_TEXT, elem_classes="markdown-text")
265
 
 
60
  dataset_df = get_dataset_summary_table(file_path='blog/Hallucination-Leaderboard-Summary.csv')
61
 
62
  if socket.gethostname() not in {'neuromancer'}:
 
 
 
63
  ui_snapshot_download(repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30)
64
  ui_snapshot_download(repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30)
65
 
 
199
  interactive=True,
200
  elem_id="filter-columns-size")
201
 
202
+ # breakpoint()
203
+
204
  leaderboard_table = gr.components.Dataframe(
205
  value=leaderboard_df[
206
  [c.name for c in fields(AutoEvalColumn) if c.never_hidden] + shown_columns.value + [AutoEvalColumn.dummy.name]
 
249
 
250
  with gr.TabItem("About", elem_id="llm-benchmark-tab-table", id=2):
251
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
252
+
253
  dataset_table = gr.components.Dataframe(
254
  value=dataset_df,
255
  headers=list(dataset_df.columns),
 
257
  elem_id="dataset-table",
258
  interactive=False,
259
  visible=True,
260
+ column_widths=["15%", "20%"])
261
+
262
  gr.Markdown(LLM_BENCHMARKS_DETAILS, elem_classes="markdown-text")
263
  gr.Markdown(FAQ_TEXT, elem_classes="markdown-text")
264