Spaces:
Running
Running
File size: 6,398 Bytes
38024bc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 |
# This is the main module that handles rendering the Gradio interface.
# Note: gradio will automatically create REST API endpoints for the functions that are used as event handlers in the interface.
import gradio as gr
import pandas as pd
from tasks import start_eval_task, get_leaderboard_data, get_status
def get_latest_leaderboard_html(sort_option: str) -> str:
try:
# Get the latest leaderboard data
df = get_leaderboard_data()
# Sort the dataframe so smallest PER or PWED is at the top
sort_column = "average_per" if sort_option.lower() == "per" else "average_pwed"
df = df.sort_values(by=sort_column, ascending=True)
# Format the dataframe for HTML display
df = pd.DataFrame(
{
"Model": df["model"],
"Average PER β¬οΈ": df["average_per"].apply(lambda x: f"{x:.4f}"),
"Average PWED β¬οΈ": df["average_pwed"].apply(lambda x: f"{x:.4f}"),
"Link": df["github_url"].apply(
lambda x: (
f'<a href="{x}" target="_blank">Repository</a>' if x else "N/A"
)
),
"Submission Date": pd.to_datetime(df["submission_date"]).dt.strftime(
"%Y-%m-%d"
),
}
)
return df.to_html(escape=False, index=False, classes="styled-table")
except Exception as e:
print(f"Error updating leaderboard: {e}")
return "Error updating leaderboard"
def submit_evaluation(model_name: str, submission_name: str, github_url: str) -> str:
if not model_name or not submission_name:
return "β οΈ Please provide both model name and submission name."
try:
task_id = start_eval_task(model_name, submission_name, github_url)
return f"β
Evaluation submitted successfully! Task ID: {task_id}"
except Exception as e:
return f"β Error: {str(e)}"
with gr.Blocks(
css="""
.styled-table {
width: 100%;
border-collapse: collapse;
margin: 25px 0;
font-size: 0.9em;
font-family: sans-serif;
box-shadow: 0 0 20px rgba(0, 0, 0, 0.15);
}
.styled-table thead tr {
background: linear-gradient(45deg, #092746, #073562, #0A648F);
}
.styled-table th {
color: white;
}
.styled-table th,
.styled-table td {
padding: 12px 15px;
}
.styled-table tbody tr {
border-bottom: 1px solid #dddddd;
}
"""
) as demo:
gr.Markdown("# π― English Phonemic Transcription Leaderboard")
gr.Markdown("#### Developed By: [Koel Labs](https://koellabs.com)")
gr.Markdown(
"""
## Explanation of Metrics
- **PER (Phoneme Error Rate)**: The Levenshtein distance calculated between phoneme sequences of the predicted and actual transcriptions.
- **PWED (Phoneme Weighted Edit Distance)**: Edit distance between the predicted and actual phoneme sequences, weighted by the phonemic feature distance. Method by the [panphon library](https://github.com/dmort27/panphon)
Read more about evaluations on [our blog](https://www.koellabs.com/blog/phonemic-transcription-metrics)
"""
)
gr.Markdown(
"""
## Test Set Information
The test set used for evaluation is from the [TIMIT speech corpus](https://www.kaggle.com/datasets/mfekadu/darpa-timit-acousticphonetic-continuous-speech). The TIMIT corpus is a widely used dataset for speech recognition research.
## Compute
This leaderboard uses the free basic plan (16GB RAM, 2vCPUs) to allow for reproducability. The evaluation may take several hours to complete. Please be patient and do not submit the same model multiple times.
## Contributing, Questions, and Feedback
Please read the [README.md](https://huggingface.co/spaces/KoelLabs/IPA-Transcription-EN/blob/main/README.md) for more information on how to contribute, ask questions, or provide feedback.
"""
)
with gr.Tabs():
with gr.TabItem("π Leaderboard"):
with gr.Row(elem_classes="controls-row"):
# Controls side by side
sort_dropdown = gr.Dropdown(
choices=["PWED", "PER"],
value="PWED",
interactive=True,
scale=2,
container=False, # Removes the box around the dropdown
label=None, # Removes the "Sort by" label
)
refresh_btn = gr.Button("Refresh π", scale=2) # Simplified button text
leaderboard_html = gr.HTML(get_latest_leaderboard_html(sort_dropdown.value))
sort_dropdown.change(
fn=get_latest_leaderboard_html,
inputs=[sort_dropdown],
outputs=leaderboard_html,
)
refresh_btn.click(
fn=get_latest_leaderboard_html,
inputs=[sort_dropdown],
outputs=leaderboard_html,
)
with gr.TabItem("π Submit Model"):
model_name = gr.Textbox(
label="Model Name", placeholder="facebook/wav2vec2-lv-60-espeak-cv-ft"
)
submission_name = gr.Textbox(
label="Submission Name", placeholder="My Model v1.0"
)
github_url = gr.Textbox(
label="Github/Kaggle/HF URL (optional)",
placeholder="https://github.com/username/repo",
)
submit_btn = gr.Button("Submit")
result = gr.Textbox(label="Submission Status")
submit_btn.click(
fn=submit_evaluation,
inputs=[model_name, submission_name, github_url],
outputs=result,
)
with gr.TabItem("π Model Status"):
query = gr.Textbox(
label="Model Name or Task ID",
placeholder="Enter model name (e.g., facebook/wav2vec2-lv-60-espeak-cv-ft)",
)
status_btn = gr.Button("Check Status")
status_output = gr.JSON(label="Status")
status_btn.click(fn=get_status, inputs=query, outputs=status_output)
if __name__ == "__main__":
demo.launch()
|