caliangandrew commited on
Commit
a218cef
·
verified ·
1 Parent(s): a15ffed

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +70 -22
app.py CHANGED
@@ -1,17 +1,33 @@
1
  import gradio as gr
 
2
  import pandas as pd
 
 
3
 
4
- # Data for Table 1 - Average Performance Metrics
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  data_avg_performance = {
6
  "Detector": ["NPR", "UCF", "CAMO"],
7
  "Accuracy": [0.7169, 0.7229, 0.7555],
8
  "Precision": [0.9193, 0.9436, 0.9442],
9
  "Recall": [0.5996, 0.592, 0.647],
10
- "F-1": [0.7258, 0.7275, 0.7679],
11
  "MCC": [0.5044, 0.5285, 0.5707],
12
  }
13
 
14
- # Data for Table 2 - Dataset-specific Accuracy
15
  data_dataset_accuracy = {
16
  "Detector": ["NPR", "UCF", "CAMO"],
17
  "CelebA-HQ": [0.987, 0.995, 0.999],
@@ -24,26 +40,58 @@ data_dataset_accuracy = {
24
  "MS-COCO-Flux": [0.588, 0.576, 0.59],
25
  }
26
 
27
- # Convert to DataFrames
28
  df_avg_performance = pd.DataFrame(data_avg_performance)
29
  df_dataset_accuracy = pd.DataFrame(data_dataset_accuracy)
30
 
31
- # Create Gradio interface to display the tables
32
- def leaderboard_view():
33
- return df_avg_performance, df_dataset_accuracy
34
-
35
- # Gradio Interface
36
- demo = gr.Interface(
37
- fn=leaderboard_view,
38
- inputs=[],
39
- outputs=[
40
- gr.DataFrame(label="Average Performance Metrics"),
41
- gr.DataFrame(label="Dataset-specific Accuracy"),
42
- ],
43
- title="Deepfake Detector Arena Leaderboard",
44
- description="This leaderboard shows detectors and their average performance metrics as well as dataset-specific accuracy for various datasets.",
45
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
 
47
- # Launch Gradio app
48
- demo.launch(share=True)
49
- #demo.launch(server_name="0.0.0.0", server_port=7860)
 
1
  import gradio as gr
2
+ from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns
3
  import pandas as pd
4
+ from apscheduler.schedulers.background import BackgroundScheduler
5
+ from huggingface_hub import snapshot_download
6
 
7
+ # Placeholder imports - replace with your own or modify as needed
8
+ from src.about import (
9
+ CITATION_BUTTON_LABEL,
10
+ CITATION_BUTTON_TEXT,
11
+ EVALUATION_QUEUE_TEXT,
12
+ INTRODUCTION_TEXT,
13
+ TITLE,
14
+ )
15
+ from src.display.css_html_js import custom_css
16
+ from src.display.utils import (
17
+ AutoEvalColumn,
18
+ fields
19
+ )
20
+
21
+ # Deepfake detector data
22
  data_avg_performance = {
23
  "Detector": ["NPR", "UCF", "CAMO"],
24
  "Accuracy": [0.7169, 0.7229, 0.7555],
25
  "Precision": [0.9193, 0.9436, 0.9442],
26
  "Recall": [0.5996, 0.592, 0.647],
27
+ "F1-Score": [0.7258, 0.7275, 0.7679],
28
  "MCC": [0.5044, 0.5285, 0.5707],
29
  }
30
 
 
31
  data_dataset_accuracy = {
32
  "Detector": ["NPR", "UCF", "CAMO"],
33
  "CelebA-HQ": [0.987, 0.995, 0.999],
 
40
  "MS-COCO-Flux": [0.588, 0.576, 0.59],
41
  }
42
 
43
+ # Convert data to DataFrames
44
  df_avg_performance = pd.DataFrame(data_avg_performance)
45
  df_dataset_accuracy = pd.DataFrame(data_dataset_accuracy)
46
 
47
+ def init_leaderboard():
48
+ if df_avg_performance.empty or df_dataset_accuracy.empty:
49
+ raise ValueError("Leaderboard DataFrames are empty.")
50
+
51
+ # Combine the two dataframes into a single leaderboard
52
+ leaderboard = Leaderboard(
53
+ value=df_avg_performance,
54
+ datatype=['str', 'number', 'number', 'number', 'number', 'number'],
55
+ select_columns=SelectColumns(
56
+ default_selection=["Detector", "Accuracy", "Precision", "Recall", "F1-Score", "MCC"],
57
+ label="Select Columns to Display:"
58
+ ),
59
+ search_columns=["Detector"],
60
+ filter_columns=[
61
+ ColumnFilter("Detector", type="checkboxgroup", label="Detectors"),
62
+ ],
63
+ bool_checkboxgroup_label="Hide detectors",
64
+ interactive=False,
65
+ )
66
+ return leaderboard
67
+
68
+ demo = gr.Blocks(css=custom_css)
69
+ with demo:
70
+ gr.HTML(TITLE)
71
+ gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
72
+
73
+ with gr.Tabs(elem_classes="tab-buttons") as tabs:
74
+ with gr.TabItem("🏅 Deepfake Detector Arena", elem_id="dfd-leaderboard-tab", id=0):
75
+ leaderboard = init_leaderboard()
76
+ gr.Markdown("## Detector Average Performance", elem_classes="markdown-text")
77
+ gr.DataFrame(value=df_avg_performance)
78
+
79
+ gr.Markdown("## Dataset-specific Accuracy", elem_classes="markdown-text")
80
+ gr.DataFrame(value=df_dataset_accuracy)
81
+
82
+ with gr.TabItem("📝 About", elem_id="about-tab", id=2):
83
+ gr.Markdown("This leaderboard evaluates deepfake detection algorithms on various metrics and datasets.")
84
+
85
+ with gr.TabItem("🚀 Submit Detector Results", elem_id="submit-tab", id=3):
86
+ gr.Markdown("Submit your detector results for evaluation.")
87
+ # Add submission form elements as needed here (e.g., textboxes, file upload)
88
+
89
+ with gr.Row():
90
+ with gr.Accordion("📙 Citation", open=False):
91
+ gr.Textbox(value=CITATION_BUTTON_TEXT, label=CITATION_BUTTON_LABEL, lines=20, show_copy_button=True)
92
+
93
+ scheduler = BackgroundScheduler()
94
+ scheduler.add_job(lambda: print("Scheduled Task"), "interval", seconds=1800)
95
+ scheduler.start()
96
 
97
+ demo.queue(default_concurrency_limit=40).launch()