Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns | |
| import pandas as pd | |
| from apscheduler.schedulers.background import BackgroundScheduler | |
| from huggingface_hub import snapshot_download | |
| from gradio.components.textbox import Textbox | |
| from gradio.components.dataframe import Dataframe | |
| from gradio.components.checkboxgroup import CheckboxGroup | |
| # from fastchat.serve.monitor.monitor import build_leaderboard_tab, build_basic_stats_tab, basic_component_values, leader_component_values | |
| from src.about import ( | |
| CITATION_BUTTON_LABEL, | |
| CITATION_BUTTON_TEXT, | |
| EVALUATION_QUEUE_TEXT, | |
| INTRODUCTION_TEXT, | |
| LLM_BENCHMARKS_TEXT, | |
| TITLE, | |
| ) | |
| from src.display.css_html_js import custom_css | |
| from src.display.utils import ( | |
| BENCHMARK_COLS, | |
| COLS, | |
| EVAL_COLS, | |
| EVAL_TYPES, | |
| AutoEvalColumn, | |
| fields, | |
| ) | |
| from src.envs import ( | |
| API, | |
| EVAL_DETAILED_RESULTS_PATH, | |
| EVAL_RESULTS_PATH, | |
| EVAL_DETAILED_RESULTS_REPO, | |
| REPO_ID, | |
| RESULTS_REPO, | |
| TOKEN, | |
| ) | |
| from src.populate import get_leaderboard_df | |
| def restart_space(): | |
| API.restart_space(repo_id=REPO_ID) | |
| ### Space initialisation | |
| try: | |
| print(EVAL_DETAILED_RESULTS_REPO) | |
| snapshot_download( | |
| repo_id=EVAL_DETAILED_RESULTS_REPO, | |
| local_dir=EVAL_DETAILED_RESULTS_PATH, | |
| repo_type="dataset", | |
| tqdm_class=None, | |
| etag_timeout=30, | |
| token=TOKEN, | |
| ) | |
| except Exception: | |
| restart_space() | |
| try: | |
| print(EVAL_RESULTS_PATH) | |
| snapshot_download( | |
| repo_id=RESULTS_REPO, | |
| local_dir=EVAL_RESULTS_PATH, | |
| repo_type="dataset", | |
| tqdm_class=None, | |
| etag_timeout=30, | |
| token=TOKEN, | |
| ) | |
| except Exception: | |
| restart_space() | |
| LEADERBOARD_DF = get_leaderboard_df(RESULTS_REPO) | |
| def init_leaderboard(dataframes): | |
| subsets = list(dataframes.keys()) | |
| with gr.Row(): | |
| selected_subset = gr.Dropdown(choices=subsets, label="Select Dataset Subset", value=subsets[-1]) | |
| research_textbox = gr.Textbox(placeholder="π Search Models... [press enter]", label="Filter Models by Name") | |
| selected_columns = gr.CheckboxGroup( | |
| choices=[c.name for c in fields(AutoEvalColumn) if not c.hidden], | |
| label="Select Columns to Display", | |
| value=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default], | |
| ) | |
| data = dataframes[subsets[-1]] | |
| with gr.Row(): | |
| datatype = [c.type for c in fields(AutoEvalColumn)] | |
| df = gr.Dataframe(data, datatype=datatype, type="pandas") | |
| def refresh(subset): | |
| global LEADERBOARD_DF | |
| LEADERBOARD_DF = get_leaderboard_df(RESULTS_REPO) | |
| research_textbox.value = "" | |
| selected_subset.choices = subsets | |
| return update_data(subset, research_textbox, selected_columns) | |
| def update_data(subset, search_term, selected_columns): | |
| print("Subset:", subset) | |
| print("Search Term:", search_term) | |
| print("Selected Columns:", selected_columns) | |
| filtered_data = dataframes[subset][dataframes[subset]['Model Name'].str.contains(search_term, case=False)] | |
| selected_columns = [c.name for c in fields(AutoEvalColumn) if c.name in selected_columns] | |
| selected_data = filtered_data[selected_columns] | |
| selected_data.sort_values(by="Total", ascending=False, inplace=True) | |
| return gr.DataFrame(selected_data, type="pandas", datatype=[c.type for c in fields(AutoEvalColumn) if c.name in selected_columns]) | |
| with gr.Row(): | |
| refresh_button = gr.Button("Refresh") | |
| refresh_button.click( | |
| refresh, | |
| inputs=[ | |
| selected_subset, | |
| ], | |
| outputs=df, | |
| concurrency_limit=20, | |
| ) | |
| selected_subset.change(update_data, inputs=[selected_subset, research_textbox, selected_columns], outputs=df) | |
| research_textbox.submit(update_data, inputs=[selected_subset, research_textbox, selected_columns], outputs=df) | |
| selected_columns.change(update_data, inputs=[selected_subset, research_textbox, selected_columns], outputs=df) | |
| # return Leaderboard( | |
| # value=dataframes, | |
| # datatype=[c.type for c in fields(AutoEvalColumn)], | |
| # select_columns=SelectColumns( | |
| # default_selection=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default], | |
| # cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden], | |
| # label="Select Columns to Display:", | |
| # ), | |
| # search_columns=[AutoEvalColumn.model.name], | |
| # hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden], | |
| # filter_columns=[ | |
| # ColumnFilter( | |
| # column=AutoEvalColumn.dataset_version.name, | |
| # choices=subsets, | |
| # default=subsets[-1], | |
| # ) | |
| # # gr.Dropdown(choices=subsets, label="Select Dataset Subset", value=subsets[-1]) | |
| # ], | |
| # interactive=False, | |
| # ) | |
| demo = gr.Blocks(css=custom_css) | |
| with demo: | |
| gr.HTML(TITLE) | |
| gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text") | |
| with gr.Tabs(elem_classes="tab-buttons") as tabs: | |
| with gr.TabItem("π LiveBench Results", elem_id="llm-benchmark-tab-table", id=0): | |
| init_leaderboard(LEADERBOARD_DF) | |
| with gr.TabItem("π About", elem_id="llm-benchmark-tab-table", id=2): | |
| gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text") | |
| # with gr.Row(): | |
| # with gr.Accordion("π Citation", open=False): | |
| # citation_button = gr.Textbox( | |
| # value=CITATION_BUTTON_TEXT, | |
| # label=CITATION_BUTTON_LABEL, | |
| # lines=20, | |
| # elem_id="citation-button", | |
| # show_copy_button=True, | |
| # ) | |
| scheduler = BackgroundScheduler() | |
| scheduler.add_job(restart_space, "interval", seconds=1800) | |
| scheduler.start() | |
| demo.queue(default_concurrency_limit=40).launch() | |