caliangandrew commited on
Commit
b40d6a8
·
verified ·
1 Parent(s): d9c4f46

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -26
app.py CHANGED
@@ -1,5 +1,4 @@
1
  import gradio as gr
2
- from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns
3
  import pandas as pd
4
  from apscheduler.schedulers.background import BackgroundScheduler
5
 
@@ -29,27 +28,19 @@ data_dataset_accuracy = {
29
  df_avg_performance = pd.DataFrame(data_avg_performance)
30
  df_dataset_accuracy = pd.DataFrame(data_dataset_accuracy)
31
 
32
- def init_leaderboard():
33
- if df_avg_performance.empty:
34
- raise ValueError("Leaderboard DataFrame is empty.")
35
-
36
- # Display average performance leaderboard only
37
- leaderboard = Leaderboard(
38
- value=df_avg_performance,
39
- datatype=['str', 'number', 'number', 'number', 'number', 'number'],
40
- select_columns=SelectColumns(
41
- default_selection=["Detector", "Accuracy", "Precision", "Recall", "F1-Score", "MCC"],
42
- label="Select Columns to Display:"
43
- ),
44
- search_columns=["Detector"],
45
- filter_columns=[
46
- ColumnFilter("Detector", type="checkboxgroup", label="Detectors"),
47
- ],
48
- bool_checkboxgroup_label="Hide detectors",
49
- interactive=False,
50
- )
51
- return leaderboard
52
 
 
 
 
 
 
 
 
 
53
  demo = gr.Blocks()
54
 
55
  with demo:
@@ -58,12 +49,17 @@ with demo:
58
 
59
  with gr.Tabs():
60
  with gr.TabItem("🏅 Deepfake Detector Arena", elem_id="dfd-leaderboard-tab"):
61
- # Only show the leaderboard here
62
- leaderboard = init_leaderboard()
 
 
 
 
63
 
64
- # Add a separate dataframe for dataset-specific accuracy
65
  gr.Markdown("## Dataset-specific Accuracy")
66
- gr.DataFrame(value=df_dataset_accuracy)
 
67
 
68
  with gr.TabItem("📝 About"):
69
  gr.Markdown("This leaderboard evaluates deepfake detection algorithms on various metrics and datasets.")
@@ -72,4 +68,4 @@ with demo:
72
  gr.Markdown("Submit your detector results for evaluation.")
73
  # Add submission form elements as needed here
74
 
75
- demo.queue(default_concurrency_limit=40).launch()
 
1
  import gradio as gr
 
2
  import pandas as pd
3
  from apscheduler.schedulers.background import BackgroundScheduler
4
 
 
28
  df_avg_performance = pd.DataFrame(data_avg_performance)
29
  df_dataset_accuracy = pd.DataFrame(data_dataset_accuracy)
30
 
31
+ # Function to highlight the maximum values in bold and color them
32
+ def highlight_max(s):
33
+ is_max = s == s.max()
34
+ return ['font-weight: bold; color: red;' if v else '' for v in is_max]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
 
36
+ # Style the dataframe
37
+ def style_dataframe(df):
38
+ return df.style.apply(highlight_max, subset=["Accuracy", "Precision", "Recall", "F1-Score", "MCC"])
39
+
40
+ def style_dataset_accuracy(df):
41
+ return df.style.apply(highlight_max, subset=df.columns[1:])
42
+
43
+ # Gradio demo with the styled dataframes
44
  demo = gr.Blocks()
45
 
46
  with demo:
 
49
 
50
  with gr.Tabs():
51
  with gr.TabItem("🏅 Deepfake Detector Arena", elem_id="dfd-leaderboard-tab"):
52
+ # Add text for Average Performance Metrics
53
+ gr.Markdown("## Average Performance Metrics")
54
+
55
+ # Display the average performance metrics with highlighted max values
56
+ styled_avg_performance = style_dataframe(df_avg_performance)
57
+ gr.DataFrame(styled_avg_performance.render(), label="Average Performance Metrics", interactive=False)
58
 
59
+ # Add a separate dataframe for dataset-specific accuracy with highlighted max values
60
  gr.Markdown("## Dataset-specific Accuracy")
61
+ styled_dataset_accuracy = style_dataset_accuracy(df_dataset_accuracy)
62
+ gr.DataFrame(styled_dataset_accuracy.render(), label="Dataset-specific Accuracy", interactive=False)
63
 
64
  with gr.TabItem("📝 About"):
65
  gr.Markdown("This leaderboard evaluates deepfake detection algorithms on various metrics and datasets.")
 
68
  gr.Markdown("Submit your detector results for evaluation.")
69
  # Add submission form elements as needed here
70
 
71
+ demo.queue(default_concurrency_limit=40).launch()