Spaces:
Runtime error
Runtime error
from dataclasses import dataclass | |
from enum import Enum | |
class Task: | |
benchmark: str | |
metric: str | |
col_name: str | |
# Select your tasks here | |
# --------------------------------------------------- | |
class Tasks(Enum): | |
# task_key in the json file, metric_key in the json file, name to display in the leaderboard | |
task0 = Task("lid", "acc", "LID") | |
task1 = Task("topic_classification", "acc", "TC") | |
task2 = Task("rc_qa", "acc", "RC-QA") | |
task3 = Task("nli", "acc", "NLI") | |
task4 = Task("machine_translation_xx_eng", "chrf", "MT (xx-en)") | |
task5 = Task("machine_translation_eng_xx", "chrf", "MT (en-xx)") | |
class SpeechTasks(Enum): | |
# task_key in the json file, metric_key in the json file, name to display in the leaderboard | |
task0 = Task("lid", "acc", "LID") | |
task1 = Task("topic_classification", "acc", "TC") | |
task2 = Task("rc_qa", "acc", "RC-QA") | |
task3 = Task("asr", "cer", "ASR") | |
task4 = Task("s2tt", "chrf", "S2TT") | |
NUM_FEWSHOT = 0 # Change with your few shot | |
# --------------------------------------------------- | |
# Your leaderboard name | |
TITLE = """<h1 align="center" id="space-title">mSTEB Leaderboard</h1>""" | |
# What does your leaderboard evaluate? | |
INTRODUCTION_TEXT = """ | |
This leaderboard has the results of evaluation of models on mSTEB benchmark. | |
""" | |
# Which evaluations are you running? how can people reproduce what you have? | |
LLM_BENCHMARKS_TEXT = f""" | |
## Reproducibility | |
To reproduce our results please look at the github page for mSTEB: | |
https://github.com/McGill-NLP/mSTEB | |
""" | |
EVALUATION_QUEUE_TEXT = """ | |
## Some good practices before submitting a model | |
### 1) Make sure you can load your model and tokenizer using AutoClasses: | |
```python | |
from transformers import AutoConfig, AutoModel, AutoTokenizer | |
config = AutoConfig.from_pretrained("your model name", revision=revision) | |
model = AutoModel.from_pretrained("your model name", revision=revision) | |
tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision) | |
``` | |
If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded. | |
Note: make sure your model is public! | |
Note: if your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted! | |
### 2) Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index) | |
It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`! | |
### 3) Make sure your model has an open license! | |
This is a leaderboard for Open LLMs, and we'd love for as many people as possible to know they can use your model 🤗 | |
### 4) Fill up your model card | |
When we add extra information about models to the leaderboard, it will be automatically taken from the model card | |
## In case of model failure | |
If your model is displayed in the `FAILED` category, its execution stopped. | |
Make sure you have followed the above steps first. | |
If everything is done, check you can launch the EleutherAIHarness on your model locally, using the above command without modifications (you can add `--limit` to limit the number of examples per task). | |
""" | |
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results" | |
CITATION_BUTTON_TEXT = r""" | |
@misc{beyene2025mstebmassivelymultilingualevaluation, | |
title = {mSTEB: Massively Multilingual Evaluation of LLMs on Speech and Text Tasks}, | |
author = {Luel Hagos Beyene and Vivek Verma and Min Ma and Jesujoba O. Alabi | |
and Fabian David Schmidt and Joyce Nakatumba-Nabende and | |
David Ifeoluwa Adelani}, | |
year = {2025}, | |
eprint = {2506.08400}, | |
archivePrefix = {arXiv}, | |
primaryClass = {cs.CL}, | |
url = {https://arxiv.org/abs/2506.08400} | |
} | |
""" | |