Upload folder using huggingface_hub
Browse files- app/pages.py +36 -20
app/pages.py
CHANGED
|
@@ -37,26 +37,38 @@ def dashboard():
|
|
| 37 |
[][gh]
|
| 38 |
""")
|
| 39 |
|
| 40 |
-
audio_url = "https://arxiv.org/abs/2406.16020"
|
| 41 |
|
| 42 |
-
st.markdown("#### News")
|
| 43 |
st.markdown("""
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
|
| 49 |
st.divider()
|
| 50 |
|
| 51 |
-
st.markdown("
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
|
| 57 |
with st.container():
|
| 58 |
-
left_co, center_co, right_co = st.columns([0.5,
|
| 59 |
-
with
|
| 60 |
st.image("./style/audio_overview.png",
|
| 61 |
caption="Overview of the datasets in AudioBench.",
|
| 62 |
)
|
|
@@ -68,24 +80,28 @@ def dashboard():
|
|
| 68 |
|
| 69 |
st.markdown("###### :dart: Our Benchmark includes: ")
|
| 70 |
cols = st.columns(10)
|
| 71 |
-
cols[
|
| 72 |
-
cols[
|
| 73 |
-
cols[
|
| 74 |
|
| 75 |
|
| 76 |
st.divider()
|
| 77 |
with st.container():
|
| 78 |
-
st.
|
| 79 |
|
| 80 |
-
|
| 81 |
-
|
|
|
|
|
|
|
| 82 |
@article{wang2024audiobench,
|
| 83 |
title={AudioBench: A Universal Benchmark for Audio Large Language Models},
|
| 84 |
author={Wang, Bin and Zou, Xunlong and Lin, Geyu and Sun, Shuo and Liu, Zhuohan and Zhang, Wenyu and Liu, Zhengyuan and Aw, AiTi and Chen, Nancy F},
|
| 85 |
journal={arXiv preprint arXiv:2406.16020},
|
| 86 |
year={2024}
|
| 87 |
}
|
| 88 |
-
|
|
|
|
|
|
|
| 89 |
|
| 90 |
def asr():
|
| 91 |
st.title("Task: Automatic Speech Recognition")
|
|
|
|
| 37 |
[][gh]
|
| 38 |
""")
|
| 39 |
|
|
|
|
| 40 |
|
|
|
|
| 41 |
st.markdown("""
|
| 42 |
+
### Changelog
|
| 43 |
+
|
| 44 |
+
- **Dec 13, 2024**:
|
| 45 |
+
- Singlish ASR task added! The datasets are available on [HF](https://huggingface.co/datasets/MERaLiON/MNSC).
|
| 46 |
+
|
| 47 |
+
- **Dec 12, 2024**:
|
| 48 |
+
- Updated layout and added support for comparison between models with similar sizes.
|
| 49 |
+
- Reorganized layout for a better user experience.
|
| 50 |
+
- Added performance summary for each task.
|
| 51 |
+
|
| 52 |
+
- **Aug 2024**:
|
| 53 |
+
- Initial leaderboard is now online.
|
| 54 |
+
""")
|
| 55 |
|
| 56 |
st.divider()
|
| 57 |
|
| 58 |
+
st.markdown("""
|
| 59 |
+
#### What is [AudioBench](https://arxiv.org/abs/2406.16020)?
|
| 60 |
+
|
| 61 |
+
- AudioBench is a comprehensive evaluation benchmark designed for general instruction-following audio large language models.
|
| 62 |
+
- AudioBench is a evaluation benchmark that we consistently put effort in updating and maintaining.
|
| 63 |
+
|
| 64 |
+
Below are the initial 26 datasets that are included in AudioBench. We are now exteneded to over 40 datasets and going to extend to more in the future.
|
| 65 |
+
"""
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
|
| 69 |
with st.container():
|
| 70 |
+
left_co, center_co, right_co = st.columns([1, 0.5, 0.5])
|
| 71 |
+
with left_co:
|
| 72 |
st.image("./style/audio_overview.png",
|
| 73 |
caption="Overview of the datasets in AudioBench.",
|
| 74 |
)
|
|
|
|
| 80 |
|
| 81 |
st.markdown("###### :dart: Our Benchmark includes: ")
|
| 82 |
cols = st.columns(10)
|
| 83 |
+
cols[0].metric(label="Tasks", value=">8")
|
| 84 |
+
cols[1].metric(label="Datasets", value=">40")
|
| 85 |
+
cols[2].metric(label="Evaluated Models", value=">5")
|
| 86 |
|
| 87 |
|
| 88 |
st.divider()
|
| 89 |
with st.container():
|
| 90 |
+
left_co, center_co, right_co = st.columns([1, 0.5, 0.5])
|
| 91 |
|
| 92 |
+
with left_co:
|
| 93 |
+
st.markdown("""
|
| 94 |
+
##### Citations :round_pushpin:
|
| 95 |
+
```
|
| 96 |
@article{wang2024audiobench,
|
| 97 |
title={AudioBench: A Universal Benchmark for Audio Large Language Models},
|
| 98 |
author={Wang, Bin and Zou, Xunlong and Lin, Geyu and Sun, Shuo and Liu, Zhuohan and Zhang, Wenyu and Liu, Zhengyuan and Aw, AiTi and Chen, Nancy F},
|
| 99 |
journal={arXiv preprint arXiv:2406.16020},
|
| 100 |
year={2024}
|
| 101 |
}
|
| 102 |
+
```
|
| 103 |
+
""")
|
| 104 |
+
|
| 105 |
|
| 106 |
def asr():
|
| 107 |
st.title("Task: Automatic Speech Recognition")
|