DFBench's picture
Upload app.py with huggingface_hub
07c4b4b verified
raw
history blame
6.17 kB
import gradio as gr
import pandas as pd
LEADERBOARD_PATH = "leaderboard.csv"
def load_leaderboard():
try:
return pd.read_csv(LEADERBOARD_PATH)
except FileNotFoundError:
return pd.DataFrame([{"Status": "No leaderboard data available"}])
def style_leaderboard(df: pd.DataFrame):
"""Return styled HTML with highlighted best performers and professional formatting."""
if df.empty:
return "<p>No data available.</p>"
num_cols = [c for c in df.columns if c not in ["Rank", "Model"]]
def highlight_best(s):
if pd.api.types.is_numeric_dtype(s):
max_val = s.max()
return ['color: #6AA84F; font-weight: 600;' if v == max_val else '' for v in s]
return ['' for _ in s]
df = df.reset_index(drop=True)
styled = (df.style.apply(highlight_best, subset=num_cols, axis=0).format(precision=1).hide(axis='index'))
# Professional table styling
html = styled.to_html()
return f"""
<div style="margin: 20px 0;">
<div style="overflow-x: auto; border: 1px solid #e2e8f0; border-radius: 8px; box-shadow: 0 1px 3px rgba(0,0,0,0.1);">
<style>
table {{
width: 100%;
border-collapse: collapse;
font-family: 'Inter', -apple-system, BlinkMacSystemFont, sans-serif;
}}
th {{
font-weight: 600;
padding: 16px 12px;
text-align: left;
border-bottom: 2px solid #e2e8f0;
font-size: 14px;
}}
td {{
padding: 12px;
border-bottom: 1px solid #f1f5f9;
font-size: 14px;
}}
tr:hover {{
background-color: #7c7d7e;
}}
</style>
{html}
</div>
</div>
"""
def leaderboard_view():
df = load_leaderboard()
return style_leaderboard(df)
# ---------------- Gradio UI ---------------- #
with gr.Blocks(css="""
.gradio-container {
max-width: 1200px !important;
margin: auto;
font-family: 'Inter', -apple-system, BlinkMacSystemFont, sans-serif;
}
#title-image {
margin: 20px auto;
display: block;
max-width: 800px;
}
.gr-markdown h1 {
color: #1e293b;
font-weight: 700;
margin-bottom: 16px;
}
.gr-markdown h2 {
color: #334155;
font-weight: 600;
margin-top: 24px;
margin-bottom: 12px;
}
.gr-markdown h3 {
color: #475569;
font-weight: 600;
margin-bottom: 8px;
}
.gr-markdown p {
color: #64748b;
line-height: 1.6;
}
.gr-tab-nav button {
font-weight: 500;
}
""") as demo:
# Banner image
gr.Image("title.png", elem_id="title-image", show_label=False)
# Professional introduction
gr.Markdown("""
# DFBench: The Image Deepfake Detection Benchmark 2025
DFBench provides a standardized evaluation framework for computer vision deepfake detection systems.
This leaderboard focuses on image deepfake detection, e.g. the output of text-to-image and image-to-image models.
**Objectives:**
- Allow fair comparison between deepfake detection models on unseen test data
- Advance the state-of-the-art in synthetic media identification
This benchmark serves the academic and industry research community by providing consistent evaluation standards for deepfake detection methodologies.
""")
with gr.Tab("Leaderboard"):
gr.Markdown("## Current Performance Rankings")
gr.HTML(leaderboard_view())
gr.Markdown("""
*Leaderboard is updated upon validation of new submissions. All results are evaluated using standardized metrics on the official test dataset.*
""")
with gr.Tab("Submission Guidelines"):
gr.Markdown("""
# Model Submission Process
**Official Dataset:** [DFBench / Image-Deepfake-Detection-25](https://huggingface.co/datasets/DFBench/Image-Deepfake-Detection-25)
The evaluation dataset comprises **2,920 images** with binary classification labels:
- **Real:** Authentic, unmodified images
- **Fake:** AI-generated or synthetically modified content
---
## Submission Requirements
### File Format
Submit predictions as a CSV file with the following structure: `filename,label`.
- `filename`: Exact filename as provided in the dataset
- `label`: Binary classification result (`real` or `fake`)
For example:
```
filename,label
1.jpg,fake
2.jpeg,real
3.webp,fake
...
2920.png,fake
```
### Submission Process
1. **Prediction Generation**: Generate predictions for all 2,920 test images
2. **File Preparation**: Format results according to specification above
3. **Submission**: Send your CSV file submission to: **[email protected]**. The name of the file should correspond to the leaderboard model name, e.g. `Model_This_name.csv` will be included as `Model This name` in the leaderboard.
### Evaluation Timeline
- Submissions are processed within 5-7 business days
- Results are validated against ground truth labels
- Approved submissions are added to the public leaderboard
---
## Technical Notes
- Model names will be formatted for display (underscores converted to spaces)
- Each research group may submit one set of scores per month
- All submissions undergo automated validation before leaderboard inclusion
## Legal Notes
- The authors reserve the right to not publish or to remove a submission at their discretion
- Submissions may be excluded if found to violate ethical guidelines, contain malicious content, or appear fraudulent
- Benchmark maintainers may adjust evaluation protocols as the dataset and task evolve
- No warranties are provided regarding benchmark results, which are intended strictly for research and comparison purposes
For technical inquiries regarding the evaluation process, please contact the benchmark maintainers through the submission email.
""")
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860)