Commit
04438ea
1 Parent(s): 514720c

update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -35
app.py CHANGED
@@ -59,7 +59,11 @@ LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS,
59
 
60
  def init_leaderboard(dataframe):
61
  if dataframe is None or dataframe.empty:
62
- raise ValueError("Leaderboard DataFrame is empty or None.")
 
 
 
 
63
  return Leaderboard(
64
  value=dataframe,
65
  datatype=[c.type for c in fields(AutoEvalColumn)],
@@ -106,41 +110,11 @@ with demo:
106
  with gr.Row():
107
  gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
108
 
 
 
109
  with gr.Column():
110
- with gr.Accordion(
111
- f"✅ Finished Evaluations ({len(finished_eval_queue_df)})",
112
- open=False,
113
- ):
114
- with gr.Row():
115
- finished_eval_table = gr.components.Dataframe(
116
- value=finished_eval_queue_df,
117
- headers=EVAL_COLS,
118
- datatype=EVAL_TYPES,
119
- row_count=5,
120
- )
121
- with gr.Accordion(
122
- f"🔄 Running Evaluation Queue ({len(running_eval_queue_df)})",
123
- open=False,
124
- ):
125
- with gr.Row():
126
- running_eval_table = gr.components.Dataframe(
127
- value=running_eval_queue_df,
128
- headers=EVAL_COLS,
129
- datatype=EVAL_TYPES,
130
- row_count=5,
131
- )
132
-
133
- with gr.Accordion(
134
- f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
135
- open=False,
136
- ):
137
- with gr.Row():
138
- pending_eval_table = gr.components.Dataframe(
139
- value=pending_eval_queue_df,
140
- headers=EVAL_COLS,
141
- datatype=EVAL_TYPES,
142
- row_count=5,
143
- )
144
  with gr.Row():
145
  gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
146
 
 
59
 
60
  def init_leaderboard(dataframe):
61
  if dataframe is None or dataframe.empty:
62
+ # Instead of raising an error, display an empty leaderboard with a message
63
+ print("Leaderboard DataFrame is empty. No models have been evaluated yet.")
64
+ # Create an empty DataFrame with the necessary columns
65
+ dataframe = pd.DataFrame(columns=[c.name for c in fields(AutoEvalColumn)])
66
+ # Optionally, you can add a message to the interface to inform users
67
  return Leaderboard(
68
  value=dataframe,
69
  datatype=[c.type for c in fields(AutoEvalColumn)],
 
110
  with gr.Row():
111
  gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
112
 
113
+ # Since the evaluation queues are empty, we can remove or hide these sections
114
+ # Alternatively, display a message
115
  with gr.Column():
116
+ gr.Markdown("Evaluations are performed immediately upon submission. There are no pending or running evaluations.")
117
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
  with gr.Row():
119
  gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
120