SanderGi's picture
fix and make functional, add more datasets
c2e60bb
# This is the main module that handles rendering the Gradio interface.
# NOTE: gradio will automatically create REST API endpoints for the functions that are used as event handlers in the interface.
import gradio as gr
import pandas as pd
from tasks import start_eval_task, get_status
from hf import get_or_create_leaderboard
def get_latest_leaderboard_html(datasets: list[str], sort_option: str) -> str:
try:
# Get the latest leaderboard data
df: pd.DataFrame = get_or_create_leaderboard().sort("submission_timestamp", reverse=True).to_pandas() # type: ignore
df = df.drop_duplicates("repo_id", keep="first")
if len(df) == 0:
return "No scores, please submit models for evaluation."
# Sort the dataframe so smallest PER or FER is at the top
sort_column = "average_per" if sort_option.lower() == "per" else "average_fer"
df = df.sort_values(by=sort_column, ascending=True)
# Format the dataframe for HTML display
df = pd.DataFrame(
{
"Model": df.apply(
lambda r: f'<a href="https://huggingface.co/{r["repo_id"]}" target="_blank">{r["display_name"]}</a>',
axis=1,
),
"Average PER ⬇️": df["average_per"].apply(lambda x: f"{100 * x:.2f}%"),
}
| {
f"{d} FER ⬇️": df["average_fer" if d == "Average" else f"fer_{d}"].apply(
lambda x: f"{100 * x:.2f}%"
)
for d in datasets
}
| {
"Link": df["url"].apply(
lambda x: (
f'<a href="{x}" target="_blank">Repository</a>' if x else "N/A"
)
),
"Submission Date": pd.to_datetime(
df["submission_timestamp"]
).dt.strftime("%Y-%m-%d"),
}
)
return df.to_html(escape=False, index=False, classes="styled-table")
except Exception as e:
return f"Error updating leaderboard: {type(e).__name__} - {e}"
def submit_evaluation(model_id: str, display_name: str, url: str) -> str:
model_id = model_id.strip()
display_name = display_name.strip()
if not model_id or not display_name:
return "⚠️ Please provide both model name and submission name."
try:
task_id = start_eval_task(display_name, model_id, url)
return f"βœ… Evaluation submitted successfully! Task ID: {task_id}"
except Exception as e:
return f"❌ Error: {str(e)}"
with gr.Blocks(
css="""
.styled-table {
width: 100%;
border-collapse: collapse;
margin: 25px 0;
font-size: 0.9em;
font-family: sans-serif;
}
.styled-table thead tr {
background: linear-gradient(45deg, #092746, #073562, #0A648F);
}
.styled-table th {
color: white;
}
.styled-table th,
.styled-table td {
padding: 12px 15px;
}
.styled-table tbody tr {
border-bottom: 1px solid #dddddd;
}
"""
) as demo:
gr.Markdown("# 🎯 English Speech2IPA Leaderboard")
gr.Markdown("#### Developed By: [Koel Labs](https://koellabs.com)")
gr.Markdown(
"""
## Evaluation
We use two standard metrics:
- **PER (Phoneme Error Rate)**: The Levenshtein distance calculated between phoneme sequences of the predicted and actual transcriptions.
- **FER (Feature Error Rate)**: The edit distance between the predicted and actual phoneme sequences, weighted by the phonetic features from [panphon](https://github.com/dmort27/panphon).
Models are evaluated on a variety of English speech: native, non-native, and impaired. Read more about evaluations on [our blog](https://www.koellabs.com/blog/phonemic-transcription-metrics)
## Compute
This leaderboard uses the free basic plan (16GB RAM, 2vCPUs) to allow for reproducability. The evaluation may take several hours to complete. Please be patient and do not submit the same model multiple times.
## Contributing, Questions, and Feedback
Please read the [README.md](https://huggingface.co/spaces/KoelLabs/IPA-Transcription-EN/blob/main/README.md) for more information on how to contribute, ask questions, or provide feedback.
"""
)
with gr.Tabs():
with gr.TabItem("πŸ† Leaderboard"):
dataset_dropdown = gr.Dropdown(
choices=["Average", "TIMIT", "EpaDB", "PSST", "SpeechOcean", "ISLE"],
value=["Average"],
multiselect=True,
interactive=True,
scale=2,
container=False, # Removes the box around the dropdown
)
with gr.Row(elem_classes="controls-row"):
sort_dropdown = gr.Dropdown(
choices=["FER", "PER"],
value="FER",
interactive=True,
scale=2,
container=False, # Removes the box around the dropdown
)
refresh_btn = gr.Button("Refresh πŸ”„", scale=2)
leaderboard_html = gr.HTML("Loading Leaderboard...")
demo.load(
fn=get_latest_leaderboard_html,
inputs=[dataset_dropdown, sort_dropdown],
outputs=leaderboard_html,
show_progress="minimal",
)
dataset_dropdown.change(
fn=get_latest_leaderboard_html,
inputs=[dataset_dropdown, sort_dropdown],
outputs=leaderboard_html,
)
sort_dropdown.change(
fn=get_latest_leaderboard_html,
inputs=[dataset_dropdown, sort_dropdown],
outputs=leaderboard_html,
)
refresh_btn.click(
fn=get_latest_leaderboard_html,
inputs=[dataset_dropdown, sort_dropdown],
outputs=leaderboard_html,
)
with gr.TabItem("πŸ“ Submit Model"):
model_id = gr.Textbox(
label="Model ID", placeholder="facebook/wav2vec2-lv-60-espeak-cv-ft"
)
display_name = gr.Textbox(
label="Submission Name", placeholder="Facebook Wav2Vec2 Espeak 60"
)
url = gr.Textbox(
label="Github/Kaggle/HF URL (optional)",
placeholder="https://github.com/username/repo",
)
submit_btn = gr.Button("Submit")
result = gr.Textbox(label="Submission Status")
submit_btn.click(
fn=submit_evaluation,
inputs=[model_id, display_name, url],
outputs=result,
)
with gr.TabItem("πŸ“Š Submission Status"):
query = gr.Textbox(
label="Model ID or Task ID",
placeholder="Enter model ID (e.g., facebook/wav2vec2-lv-60-espeak-cv-ft)",
)
status_btn = gr.Button("Check Status")
status_output = gr.JSON(label="Status")
status_btn.click(fn=get_status, inputs=query, outputs=status_output)
if __name__ == "__main__":
demo.launch()