Jerrycool commited on
Commit
be52959
Β·
verified Β·
1 Parent(s): 1117820

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +214 -244
app.py CHANGED
@@ -1,275 +1,245 @@
1
- """
2
- app.py β€” MLE-Dojo Dark-Theme Leaderboard
3
- ---------------------------------------
4
- Run: python app.py
5
- """
6
-
7
  import gradio as gr
8
  import pandas as pd
9
  from apscheduler.schedulers.background import BackgroundScheduler
10
-
11
- # ---------- Placeholder / Fallback Imports ----------
12
- try:
13
- from src.about import (
14
- CITATION_BUTTON_LABEL,
15
- CITATION_BUTTON_TEXT,
16
- EVALUATION_QUEUE_TEXT,
17
- INTRODUCTION_TEXT,
18
- LLM_BENCHMARKS_TEXT,
19
- TITLE, # 将蒫覆盖
20
- )
21
- from src.display.css_html_js import custom_css
22
- from src.envs import REPO_ID
23
- from src.submission.submit import add_new_eval
24
- except ImportError:
25
- CITATION_BUTTON_LABEL = "Citation"
26
- CITATION_BUTTON_TEXT = "Please cite us if you use this benchmark..."
27
- EVALUATION_QUEUE_TEXT = "Current evaluation queue:"
28
- INTRODUCTION_TEXT = "Welcome to the MLE-Dojo Benchmark Leaderboard."
29
- LLM_BENCHMARKS_TEXT = "Information about the benchmarks..."
30
- custom_css = ""
31
- REPO_ID = "your/space-id"
32
-
33
- def add_new_eval(*args):
34
- return "Submission placeholder."
35
-
36
- # ---------- Elo Data ----------
37
  data = [
38
- dict(model_name="gpt-4o-mini", url="https://openai.com/index/gpt-4o-mini-advancing-cost-efficient-intelligence/",
39
- organizer="OpenAI", license="Proprietary", MLE_Lite_Elo=753, Tabular_Elo=839,
40
- NLP_Elo=758, CV_Elo=754, Overall=778),
41
- dict(model_name="gpt-4o", url="https://openai.com/index/hello-gpt-4o/",
42
- organizer="OpenAI", license="Proprietary", MLE_Lite_Elo=830, Tabular_Elo=861,
43
- NLP_Elo=903, CV_Elo=761, Overall=841),
44
- dict(model_name="o3-mini", url="https://openai.com/index/openai-o3-mini/",
45
- organizer="OpenAI", license="Proprietary", MLE_Lite_Elo=1108, Tabular_Elo=1019,
46
- NLP_Elo=1056, CV_Elo=1207, Overall=1096),
47
- dict(model_name="deepseek-v3", url="https://api-docs.deepseek.com/news/news1226",
48
- organizer="DeepSeek", license="DeepSeek", MLE_Lite_Elo=1004, Tabular_Elo=1015,
49
- NLP_Elo=1028, CV_Elo=1067, Overall=1023),
50
- dict(model_name="deepseek-r1", url="https://api-docs.deepseek.com/news/news250120",
51
- organizer="DeepSeek", license="DeepSeek", MLE_Lite_Elo=1137, Tabular_Elo=1053,
52
- NLP_Elo=1103, CV_Elo=1083, Overall=1100),
53
- dict(model_name="gemini-2.0-flash", url="https://ai.google.dev/gemini-api/docs/models#gemini-2.0-flash",
54
- organizer="Google", license="Proprietary", MLE_Lite_Elo=847, Tabular_Elo=923,
55
- NLP_Elo=860, CV_Elo=978, Overall=895),
56
- dict(model_name="gemini-2.0-pro", url="https://blog.google/technology/google-deepmind/gemini-model-updates-february-2025/",
57
- organizer="Google", license="Proprietary", MLE_Lite_Elo=1064, Tabular_Elo=1139,
58
- NLP_Elo=1028, CV_Elo=973, Overall=1054),
59
- dict(model_name="gemini-2.5-pro", url="https://deepmind.google/technologies/gemini/pro/",
60
- organizer="Google", license="Proprietary", MLE_Lite_Elo=1257, Tabular_Elo=1150,
61
- NLP_Elo=1266, CV_Elo=1177, Overall=1214),
62
  ]
 
 
63
  master_df = pd.DataFrame(data)
64
 
65
- # ---------- Category Logic ----------
66
- CATEGORIES = ["Overall", "MLE-Lite", "Tabular", "NLP", "CV"]
67
- DEFAULT_CATEGORY = "Overall"
 
 
68
  category_to_column = {
69
- "Overall": "Overall",
70
- "MLE-Lite": "MLE_Lite_Elo",
71
  "Tabular": "Tabular_Elo",
72
  "NLP": "NLP_Elo",
73
  "CV": "CV_Elo",
 
74
  }
75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
 
77
- def update_leaderboard(category: str) -> pd.DataFrame:
78
- col = category_to_column.get(category, category_to_column[DEFAULT_CATEGORY])
79
- df = master_df[["model_name", "url", "organizer", "license", col]].copy()
80
- df.sort_values(by=col, ascending=False, inplace=True)
81
- df.reset_index(drop=True, inplace=True)
82
- df.insert(0, "Rank", df.index + 1)
83
- df["Model"] = df.apply(
84
- lambda r: f"<a href='{r['url']}' target='_blank'>{r['model_name']}</a>", axis=1
85
- )
86
- df.rename(
87
- columns={col: "Elo Score", "organizer": "Organizer", "license": "License"},
88
- inplace=True,
89
- )
90
- return df[["Rank", "Model", "Organizer", "License", "Elo Score"]]
91
 
 
 
92
 
93
- # ---------- Dark-Theme CSS ----------
94
- dark_css = """
95
- /* ---- Google Font & Font Awesome ---- */
96
- @import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600;700&display=swap');
97
- body {
98
- font-family: 'Inter', sans-serif;
99
- background-color: #121212;
100
- color: #e0e0e0;
101
- font-size: 15px;
102
- }
103
 
104
- /* ---- Hero Section ---- */
105
- .hero-section {
106
- background: linear-gradient(135deg, #333, #222);
107
- color: #e0e0e0;
108
- padding: 1.75rem 1rem;
109
- border-radius: .75rem;
110
- margin-bottom: 1.5rem;
111
- text-align: center;
112
- box-shadow: 0 4px 12px rgba(0,0,0,0.55);
113
- }
114
- .hero-section h1 {
115
- margin: 0;
116
- font-size: 2.2rem;
117
- font-weight: 700;
118
- display: inline-flex;
119
- align-items: center;
120
- gap: .5rem;
121
- }
122
- .hero-section h1 i { /* ε₯–ζ―δΈŽζ–‡ε­—εŒθ‘Œ & 对齐 */
123
- margin: 0;
124
- font-size: 1em;
125
- }
126
- .hero-section h2 {
127
- margin: .6rem 0 0;
128
- font-size: 1.15rem;
129
- font-weight: 400;
130
- opacity: .8;
131
- }
132
 
133
- /* ---- Tabs ---- */
134
- .tab-buttons button {
135
- border-radius: 20px !important;
136
- padding: .55rem 1.15rem !important;
137
- margin-right: .6rem !important;
138
- background: #1e1e1e !important;
139
- color: #e0e0e0 !important;
140
- border: none !important;
141
- font-size: .95rem !important;
142
- font-weight: 500 !important;
143
- transition: background .28s;
144
- }
145
- .tab-buttons button:hover { background: #2c2c2c !important; }
146
- .tab-buttons button[aria-selected="true"] {
147
- background: #444 !important;
148
- color: #fff !important;
149
- }
150
 
151
- /* ---- Category Selector ---- */
152
- #category-selector label {
153
- display: inline-block;
154
- padding: .55rem 1.2rem;
155
- margin-right: .5rem;
156
- border-radius: 999px;
157
- background: #1d1d1d;
158
- cursor: pointer;
159
- transition: background .28s, color .28s;
160
- font-weight: 600;
161
- font-size: .95rem;
162
- color: #e0e0e0;
163
- }
164
- #category-selector input[type="radio"]:checked + label {
165
- background: #3d3d3d;
166
- color: #fff;
167
- }
168
-
169
- /* ---- Dataframe / Leaderboard ---- */
170
- .dataframe-container {
171
- max-height: 420px;
172
- overflow-y: auto;
173
- }
174
- .dataframe-container table {
175
- width: 100%;
176
- border-collapse: collapse;
177
- border: none;
178
- box-shadow: 0 2px 6px rgba(0,0,0,.55);
179
- border-radius: .55rem;
180
- }
181
- .dataframe-container thead th {
182
- background: #272727;
183
- color: #e0e0e0;
184
- font-weight: 600;
185
- padding: .85rem 1rem;
186
- font-size: .9rem;
187
- }
188
- .dataframe-container tbody tr:nth-child(odd) { background: #1c1c1c; }
189
- .dataframe-container tbody tr:nth-child(even) { background: #222; }
190
- .dataframe-container td, .dataframe-container th {
191
- padding: .8rem 1rem;
192
- font-size: .88rem;
193
- }
194
- .dataframe-container td a {
195
- color: #8ab4f8;
196
- text-decoration: none;
197
- }
198
- .dataframe-container td a:hover {
199
- color: #a3c9ff;
200
- text-decoration: underline;
201
- }
202
- """
203
 
204
- custom_css += dark_css
205
 
206
- # ---------- Override Title ----------
207
- TITLE = """
208
- <div class="hero-section">
209
- <h1><i class="fas fa-trophy"></i>MLE-Dojo Benchmark Leaderboard</h1>
210
- <h2>Improving LLM Agents for Machine Learning Engineering</h2>
211
- </div>
212
- """
213
 
214
- # ---------- Build Gradio App ----------
215
- demo = gr.Blocks(css=custom_css, theme=gr.themes.Base())
216
 
217
  with demo:
218
- # 注ε…₯ Font Awesome(保证ε₯–杯可用)
219
- gr.HTML(
220
- """
221
- <link rel="stylesheet"
222
- href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.4.0/css/all.min.css"
223
- crossorigin="anonymous" referrerpolicy="no-referrer"/>
224
- """
225
- )
226
-
227
- # -------- Header & Intro --------
228
  gr.HTML(TITLE)
229
  gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
230
 
231
- # -------- Tabs --------
232
- with gr.Tabs(elem_classes="tab-buttons"):
233
- # --- Leaderboard Tab ---
234
- with gr.TabItem("πŸ“Š Leaderboard"):
235
- gr.Markdown("### Model Elo Rankings by Category")
236
- category_selector = gr.Radio(
237
- choices=CATEGORIES,
238
- value=DEFAULT_CATEGORY,
239
- interactive=True,
240
- elem_id="category-selector",
241
- label="Select Category:",
242
- )
243
- leaderboard_df = gr.Dataframe(
244
- value=update_leaderboard(DEFAULT_CATEGORY),
245
- headers=["Rank", "Model", "Organizer", "License", "Elo Score"],
246
- datatype=["number", "html", "str", "str", "number"],
247
- interactive=False,
248
- row_count=(len(master_df), "fixed"),
249
- col_count=(5, "fixed"),
250
- wrap=True,
251
- elem_id="leaderboard-table",
252
- )
253
- category_selector.change(
254
- fn=update_leaderboard,
255
- inputs=category_selector,
256
- outputs=leaderboard_df,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
257
  )
258
 
259
- # --- About Tab ---
260
- with gr.TabItem("ℹ️ About"):
261
- gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
 
 
 
 
 
 
 
262
 
263
- # -------- Citation --------
264
- with gr.Accordion("πŸ“™ Citation", open=False):
265
- gr.Textbox(
266
- value=CITATION_BUTTON_TEXT,
267
- label=CITATION_BUTTON_LABEL,
268
- lines=10,
269
- elem_id="citation-button",
270
- show_copy_button=True,
271
- )
272
 
273
- if __name__ == "__main__":
274
- print("Launching Gradio App in Dark Mode…")
275
- demo.launch()
 
 
 
 
 
 
 
1
  import gradio as gr
2
  import pandas as pd
3
  from apscheduler.schedulers.background import BackgroundScheduler
4
+ # Removed Hugging Face Hub imports as they are not needed for the simplified leaderboard
5
+ # from huggingface_hub import snapshot_download, HfApi
6
+ from src.about import ( # Assuming these still exist and are relevant for other tabs
7
+ CITATION_BUTTON_LABEL,
8
+ CITATION_BUTTON_TEXT,
9
+ EVALUATION_QUEUE_TEXT,
10
+ INTRODUCTION_TEXT,
11
+ LLM_BENCHMARKS_TEXT,
12
+ TITLE,
13
+ )
14
+ from src.display.css_html_js import custom_css # Keep custom CSS
15
+ # Removed utils imports related to the old leaderboard
16
+ # from src.display.utils import (...)
17
+ from src.envs import REPO_ID # Keep if needed for restart_space or other functions
18
+ # Removed constants related to old data paths and repos if not needed elsewhere
19
+ # from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
20
+ # Removed old data processing functions
21
+ # from src.populate import get_evaluation_queue_df, get_leaderboard_df
22
+ from src.submission.submit import add_new_eval # Keep submission logic
23
+
24
+ # --- Elo Leaderboard Configuration ---
25
+ # Data from the table provided by the user
 
 
 
 
 
26
  data = [
27
+ {'model': 'gpt-4o-mini', 'MLE-Lite_Elo': 753, 'Tabular_Elo': 839, 'NLP_Elo': 758, 'CV_Elo': 754, 'Overall': 778},
28
+ {'model': 'gpt-4o', 'MLE-Lite_Elo': 830, 'Tabular_Elo': 861, 'NLP_Elo': 903, 'CV_Elo': 761, 'Overall': 841},
29
+ {'model': 'o3-mini', 'MLE-Lite_Elo': 1108, 'Tabular_Elo': 1019, 'NLP_Elo': 1056, 'CV_Elo': 1207, 'Overall': 1096},
30
+ # Renamed 'DeepSeek-v3' to match previous list - adjust if needed
31
+ {'model': 'deepseek-v3', 'MLE-Lite_Elo': 1004, 'Tabular_Elo': 1015, 'NLP_Elo': 1028, 'CV_Elo': 1067, 'Overall': 1023},
32
+ # Renamed 'DeepSeek-r1' to match previous list - adjust if needed
33
+ {'model': 'deepseek-r1', 'MLE-Lite_Elo': 1137, 'Tabular_Elo': 1053, 'NLP_Elo': 1103, 'CV_Elo': 1083, 'Overall': 1100},
34
+ # Renamed 'Gemini-2.0-Flash' to match previous list - adjust if needed
35
+ {'model': 'gemini-2.0-flash', 'MLE-Lite_Elo': 847, 'Tabular_Elo': 923, 'NLP_Elo': 860, 'CV_Elo': 978, 'Overall': 895},
36
+ # Renamed 'Gemini-2.0-Pro' to match previous list - adjust if needed
37
+ {'model': 'gemini-2.0-pro', 'MLE-Lite_Elo': 1064, 'Tabular_Elo': 1139, 'NLP_Elo': 1028, 'CV_Elo': 973, 'Overall': 1054},
38
+ # Renamed 'Gemini-2.5-Pro' to match previous list - adjust if needed
39
+ {'model': 'gemini-2.5-pro', 'MLE-Lite_Elo': 1257, 'Tabular_Elo': 1150, 'NLP_Elo': 1266, 'CV_Elo': 1177, 'Overall': 1214},
 
 
 
 
 
 
 
 
 
 
 
40
  ]
41
+
42
+ # Create a master DataFrame
43
  master_df = pd.DataFrame(data)
44
 
45
+ # Define categories for selection (user-facing)
46
+ CATEGORIES = ["MLE-Lite", "Tabular", "NLP", "CV", "Overall"]
47
+ DEFAULT_CATEGORY = "Overall" # Set a default category
48
+
49
+ # Map user-facing categories to DataFrame column names
50
  category_to_column = {
51
+ "MLE-Lite": "MLE-Lite_Elo",
 
52
  "Tabular": "Tabular_Elo",
53
  "NLP": "NLP_Elo",
54
  "CV": "CV_Elo",
55
+ "Overall": "Overall"
56
  }
57
 
58
+ # --- Helper function to update leaderboard ---
59
+ def update_leaderboard(category):
60
+ """
61
+ Selects the relevant columns for the category, renames the score column
62
+ to 'Elo Score', sorts by score descending, and returns the DataFrame.
63
+ """
64
+ score_column = category_to_column.get(category)
65
+ if score_column is None or score_column not in master_df.columns:
66
+ # Fallback if category or column is invalid
67
+ print(f"Warning: Invalid category '{category}' or column '{score_column}'. Falling back to default.")
68
+ score_column = category_to_column[DEFAULT_CATEGORY]
69
+ if score_column not in master_df.columns: # Check fallback column too
70
+ return pd.DataFrame({"Model": [], "Elo Score": []}) # Return empty if still invalid
71
 
72
+ # Select model and the specific score column
73
+ df = master_df[['model', score_column]].copy()
 
 
 
 
 
 
 
 
 
 
 
 
74
 
75
+ # Rename the score column to 'Elo Score' for consistent display
76
+ df.rename(columns={score_column: 'Elo Score'}, inplace=True)
77
 
78
+ # Sort by 'Elo Score' descending
79
+ df.sort_values(by='Elo Score', ascending=False, inplace=True)
 
 
 
 
 
 
 
 
80
 
81
+ # Reset index for cleaner display (optional)
82
+ df.reset_index(drop=True, inplace=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
 
84
+ return df
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
 
86
+ # --- Mock/Placeholder functions/data for other tabs ---
87
+ # (Same as previous version - providing empty data)
88
+ print("Warning: Evaluation queue data fetching is disabled/mocked due to leaderboard changes.")
89
+ finished_eval_queue_df = pd.DataFrame(columns=["Model", "Status", "Requested", "Started"])
90
+ running_eval_queue_df = pd.DataFrame(columns=["Model", "Status", "Requested", "Started"])
91
+ pending_eval_queue_df = pd.DataFrame(columns=["Model", "Status", "Requested", "Started"])
92
+ EVAL_COLS = ["Model", "Status", "Requested", "Started"] # Define for the dataframe headers
93
+ EVAL_TYPES = ["str", "str", "str", "str"] # Define for the dataframe types
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
 
 
95
 
96
+ # --- Keep restart function if relevant ---
97
+ # (Same as previous version)
98
+ def restart_space():
99
+ print(f"Attempting to restart space: {REPO_ID}")
100
+ # Replace with your actual space restart mechanism if needed
 
 
101
 
102
+ # --- Gradio App Definition ---
103
+ demo = gr.Blocks(css=custom_css)
104
 
105
  with demo:
 
 
 
 
 
 
 
 
 
 
106
  gr.HTML(TITLE)
107
  gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
108
 
109
+ with gr.Tabs(elem_classes="tab-buttons") as tabs:
110
+ with gr.TabItem("πŸ… LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
111
+ with gr.Column():
112
+ gr.Markdown("## Model Elo Rankings") # New title for the section
113
+ category_selector = gr.Radio(
114
+ choices=CATEGORIES,
115
+ label="Select Category to Sort By", # Updated label
116
+ value=DEFAULT_CATEGORY, # Default selection
117
+ interactive=True,
118
+ container=False,
119
+ )
120
+ leaderboard_df_component = gr.Dataframe(
121
+ # Initialize with sorted data for the default category
122
+ value=update_leaderboard(DEFAULT_CATEGORY),
123
+ headers=["Model", "Elo Score"],
124
+ datatype=["str", "number"],
125
+ interactive=False,
126
+ # Adjust row count based on the number of models
127
+ row_count=(len(master_df), "fixed"),
128
+ col_count=(2, "fixed"),
129
+ )
130
+ # Link the radio button change to the update function
131
+ category_selector.change(
132
+ fn=update_leaderboard,
133
+ inputs=category_selector,
134
+ outputs=leaderboard_df_component
135
+ )
136
+
137
+ with gr.TabItem("πŸ“ About", elem_id="llm-benchmark-tab-table", id=2):
138
+ # (Content unchanged)
139
+ gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
140
+
141
+ with gr.TabItem("πŸš€ Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
142
+ # (Content unchanged, still uses potentially empty/mock queue data)
143
+ with gr.Column():
144
+ with gr.Row():
145
+ gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
146
+ with gr.Column():
147
+ with gr.Accordion(
148
+ f"βœ… Finished Evaluations ({len(finished_eval_queue_df)})",
149
+ open=False,
150
+ ):
151
+ with gr.Row():
152
+ finished_eval_table = gr.components.Dataframe(
153
+ value=finished_eval_queue_df,
154
+ headers=EVAL_COLS,
155
+ datatype=EVAL_TYPES,
156
+ row_count=5,
157
+ )
158
+ with gr.Accordion(
159
+ f"πŸ”„ Running Evaluation Queue ({len(running_eval_queue_df)})",
160
+ open=False,
161
+ ):
162
+ with gr.Row():
163
+ running_eval_table = gr.components.Dataframe(
164
+ value=running_eval_queue_df,
165
+ headers=EVAL_COLS,
166
+ datatype=EVAL_TYPES,
167
+ row_count=5,
168
+ )
169
+ with gr.Accordion(
170
+ f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
171
+ open=False,
172
+ ):
173
+ with gr.Row():
174
+ pending_eval_table = gr.components.Dataframe(
175
+ value=pending_eval_queue_df,
176
+ headers=EVAL_COLS,
177
+ datatype=EVAL_TYPES,
178
+ row_count=5,
179
+ )
180
+
181
+ with gr.Row():
182
+ gr.Markdown("# βœ‰οΈβœ¨ Submit your model here!", elem_classes="markdown-text")
183
+ with gr.Row():
184
+ # Submission form - kept as is
185
+ with gr.Column():
186
+ model_name_textbox = gr.Textbox(label="Model name")
187
+ revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
188
+ model_type = gr.Dropdown(
189
+ choices=["Type A", "Type B", "Type C"], # Example choices
190
+ label="Model type",
191
+ multiselect=False,
192
+ value=None,
193
+ interactive=True,
194
+ )
195
+ with gr.Column():
196
+ precision = gr.Dropdown(
197
+ choices=["float16", "bfloat16", "float32", "int8"], # Example choices
198
+ label="Precision",
199
+ multiselect=False,
200
+ value="float16",
201
+ interactive=True,
202
+ )
203
+ weight_type = gr.Dropdown(
204
+ choices=["Original", "Adapter", "Delta"], # Example choices
205
+ label="Weights type",
206
+ multiselect=False,
207
+ value="Original",
208
+ interactive=True,
209
+ )
210
+ base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
211
+
212
+ submit_button = gr.Button("Submit Eval")
213
+ submission_result = gr.Markdown()
214
+
215
+ submit_button.click(
216
+ add_new_eval,
217
+ [
218
+ model_name_textbox,
219
+ base_model_name_textbox,
220
+ revision_name_textbox,
221
+ precision,
222
+ weight_type,
223
+ model_type,
224
+ ],
225
+ submission_result,
226
  )
227
 
228
+ with gr.Row():
229
+ with gr.Accordion("πŸ“™ Citation", open=False):
230
+ # (Content unchanged)
231
+ citation_button = gr.Textbox(
232
+ value=CITATION_BUTTON_TEXT,
233
+ label=CITATION_BUTTON_LABEL,
234
+ lines=20,
235
+ elem_id="citation-button",
236
+ show_copy_button=True,
237
+ )
238
 
239
+ # --- Keep scheduler if relevant ---
240
+ # scheduler = BackgroundScheduler()
241
+ # scheduler.add_job(restart_space, "interval", seconds=1800) # Restart every 30 mins
242
+ # scheduler.start()
 
 
 
 
 
243
 
244
+ # --- Launch the app ---
245
+ demo.launch()