ahmedheakl commited on
Commit
713ff7f
·
verified ·
1 Parent(s): d0e6f8a

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -0
app.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import gradio as gr
3
+
4
+ data = {
5
+ "Method": [
6
+ "OR-Tools", "ACO", "LKH3", "NN2Opt", "Tabu",
7
+ ],
8
+ "TWCVRP": [
9
+ 57.90, 48.82, 51.35, 46.67, 45.58,
10
+ ],
11
+ "TWCVRP": [
12
+ 59.11, 42.89, 49.06, 36.59, 33.59,
13
+ ]
14
+ }
15
+
16
+ df = pd.DataFrame(data)
17
+ df['Average Score'] = df.iloc[:, 1:].mean(axis=1).round(2)
18
+ df = df[['Method', 'Average Score'] + [col for col in df.columns if col not in ['Method', 'Average Score']]]
19
+
20
+ def display_data():
21
+ return df
22
+
23
+ with gr.Blocks() as demo:
24
+ # gr.Markdown("![camel icon](https://cdn-uploads.huggingface.co/production/uploads/656864e12d73834278a8dea7/n-XfVKd1xVywH_vgPyJyQ.png)", elem_id="camel-icon") # Replace with actual camel icon URL
25
+ gr.Markdown("# **SVRP-Bench: Real-World Vehicle Routing Benchmark**")
26
+ gr.Markdown("""
27
+ This table shows the performance of different models across various tasks/datasets including CVRP and TWVRP.
28
+ """)
29
+ with gr.Tabs(elem_classes="tab-buttons") as tabs:
30
+ with gr.TabItem("🏅 LLM Leaderboard", elem_id="llm-benchmark-tab-table", id=0):
31
+ # with gr.Row():
32
+ # with gr.Column():
33
+ gr.Dataframe(value=df, label="SVRP-Bench Model Performance", interactive=False)
34
+
35
+ with gr.TabItem("📤 How to Submit", elem_id="submission-tab", id=1):
36
+ gr.Markdown("""
37
+ ## Submission Instructions
38
+ To contribute your model's results to the SVRP-Bench leaderboard:
39
+ - **Via Email**:
40
+ - Send your results to **ahmed.heakl@mbzuai.ac.ae**, and we’ll add them to the leaderboard for you.
41
+ **We look forward to seeing your contributions!**
42
+ """)
43
+
44
+ demo.launch()