Commit 
							
							·
						b28a580
	
verified
								·
						0
								Parent(s):
							
							
Duplicate from demo-leaderboard-backend/leaderboard
Browse filesCo-authored-by: Clémentine Fourrier <[email protected]>
- .gitattributes +35 -0
- .gitignore +13 -0
- .pre-commit-config.yaml +53 -0
- Makefile +13 -0
- README.md +44 -0
- app.py +204 -0
- pyproject.toml +13 -0
- requirements.txt +16 -0
- src/about.py +72 -0
- src/display/css_html_js.py +105 -0
- src/display/formatting.py +27 -0
- src/display/utils.py +110 -0
- src/envs.py +25 -0
- src/leaderboard/read_evals.py +196 -0
- src/populate.py +58 -0
- src/submission/check_validity.py +99 -0
- src/submission/submit.py +119 -0
    	
        .gitattributes
    ADDED
    
    | @@ -0,0 +1,35 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            *.7z filter=lfs diff=lfs merge=lfs -text
         | 
| 2 | 
            +
            *.arrow filter=lfs diff=lfs merge=lfs -text
         | 
| 3 | 
            +
            *.bin filter=lfs diff=lfs merge=lfs -text
         | 
| 4 | 
            +
            *.bz2 filter=lfs diff=lfs merge=lfs -text
         | 
| 5 | 
            +
            *.ckpt filter=lfs diff=lfs merge=lfs -text
         | 
| 6 | 
            +
            *.ftz filter=lfs diff=lfs merge=lfs -text
         | 
| 7 | 
            +
            *.gz filter=lfs diff=lfs merge=lfs -text
         | 
| 8 | 
            +
            *.h5 filter=lfs diff=lfs merge=lfs -text
         | 
| 9 | 
            +
            *.joblib filter=lfs diff=lfs merge=lfs -text
         | 
| 10 | 
            +
            *.lfs.* filter=lfs diff=lfs merge=lfs -text
         | 
| 11 | 
            +
            *.mlmodel filter=lfs diff=lfs merge=lfs -text
         | 
| 12 | 
            +
            *.model filter=lfs diff=lfs merge=lfs -text
         | 
| 13 | 
            +
            *.msgpack filter=lfs diff=lfs merge=lfs -text
         | 
| 14 | 
            +
            *.npy filter=lfs diff=lfs merge=lfs -text
         | 
| 15 | 
            +
            *.npz filter=lfs diff=lfs merge=lfs -text
         | 
| 16 | 
            +
            *.onnx filter=lfs diff=lfs merge=lfs -text
         | 
| 17 | 
            +
            *.ot filter=lfs diff=lfs merge=lfs -text
         | 
| 18 | 
            +
            *.parquet filter=lfs diff=lfs merge=lfs -text
         | 
| 19 | 
            +
            *.pb filter=lfs diff=lfs merge=lfs -text
         | 
| 20 | 
            +
            *.pickle filter=lfs diff=lfs merge=lfs -text
         | 
| 21 | 
            +
            *.pkl filter=lfs diff=lfs merge=lfs -text
         | 
| 22 | 
            +
            *.pt filter=lfs diff=lfs merge=lfs -text
         | 
| 23 | 
            +
            *.pth filter=lfs diff=lfs merge=lfs -text
         | 
| 24 | 
            +
            *.rar filter=lfs diff=lfs merge=lfs -text
         | 
| 25 | 
            +
            *.safetensors filter=lfs diff=lfs merge=lfs -text
         | 
| 26 | 
            +
            saved_model/**/* filter=lfs diff=lfs merge=lfs -text
         | 
| 27 | 
            +
            *.tar.* filter=lfs diff=lfs merge=lfs -text
         | 
| 28 | 
            +
            *.tflite filter=lfs diff=lfs merge=lfs -text
         | 
| 29 | 
            +
            *.tgz filter=lfs diff=lfs merge=lfs -text
         | 
| 30 | 
            +
            *.wasm filter=lfs diff=lfs merge=lfs -text
         | 
| 31 | 
            +
            *.xz filter=lfs diff=lfs merge=lfs -text
         | 
| 32 | 
            +
            *.zip filter=lfs diff=lfs merge=lfs -text
         | 
| 33 | 
            +
            *.zst filter=lfs diff=lfs merge=lfs -text
         | 
| 34 | 
            +
            *tfevents* filter=lfs diff=lfs merge=lfs -text
         | 
| 35 | 
            +
            scale-hf-logo.png filter=lfs diff=lfs merge=lfs -text
         | 
    	
        .gitignore
    ADDED
    
    | @@ -0,0 +1,13 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            auto_evals/
         | 
| 2 | 
            +
            venv/
         | 
| 3 | 
            +
            __pycache__/
         | 
| 4 | 
            +
            .env
         | 
| 5 | 
            +
            .ipynb_checkpoints
         | 
| 6 | 
            +
            *ipynb
         | 
| 7 | 
            +
            .vscode/
         | 
| 8 | 
            +
             | 
| 9 | 
            +
            eval-queue/
         | 
| 10 | 
            +
            eval-results/
         | 
| 11 | 
            +
            eval-queue-bk/
         | 
| 12 | 
            +
            eval-results-bk/
         | 
| 13 | 
            +
            logs/
         | 
    	
        .pre-commit-config.yaml
    ADDED
    
    | @@ -0,0 +1,53 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            # Copyright (c) 2022, NVIDIA CORPORATION.  All rights reserved.
         | 
| 2 | 
            +
            #
         | 
| 3 | 
            +
            # Licensed under the Apache License, Version 2.0 (the "License");
         | 
| 4 | 
            +
            # you may not use this file except in compliance with the License.
         | 
| 5 | 
            +
            # You may obtain a copy of the License at
         | 
| 6 | 
            +
            #
         | 
| 7 | 
            +
            #     http://www.apache.org/licenses/LICENSE-2.0
         | 
| 8 | 
            +
            #
         | 
| 9 | 
            +
            # Unless required by applicable law or agreed to in writing, software
         | 
| 10 | 
            +
            # distributed under the License is distributed on an "AS IS" BASIS,
         | 
| 11 | 
            +
            # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
         | 
| 12 | 
            +
            # See the License for the specific language governing permissions and
         | 
| 13 | 
            +
            # limitations under the License.
         | 
| 14 | 
            +
             | 
| 15 | 
            +
            default_language_version:
         | 
| 16 | 
            +
              python: python3
         | 
| 17 | 
            +
             | 
| 18 | 
            +
            ci:
         | 
| 19 | 
            +
              autofix_prs: true
         | 
| 20 | 
            +
              autoupdate_commit_msg: '[pre-commit.ci] pre-commit suggestions'
         | 
| 21 | 
            +
              autoupdate_schedule: quarterly
         | 
| 22 | 
            +
             | 
| 23 | 
            +
            repos:
         | 
| 24 | 
            +
              - repo: https://github.com/pre-commit/pre-commit-hooks
         | 
| 25 | 
            +
                rev: v4.3.0
         | 
| 26 | 
            +
                hooks:
         | 
| 27 | 
            +
                  - id: check-yaml
         | 
| 28 | 
            +
                  - id: check-case-conflict
         | 
| 29 | 
            +
                  - id: detect-private-key
         | 
| 30 | 
            +
                  - id: check-added-large-files
         | 
| 31 | 
            +
                    args: ['--maxkb=1000']
         | 
| 32 | 
            +
                  - id: requirements-txt-fixer
         | 
| 33 | 
            +
                  - id: end-of-file-fixer
         | 
| 34 | 
            +
                  - id: trailing-whitespace
         | 
| 35 | 
            +
             | 
| 36 | 
            +
              - repo: https://github.com/PyCQA/isort
         | 
| 37 | 
            +
                rev: 5.12.0
         | 
| 38 | 
            +
                hooks:
         | 
| 39 | 
            +
                  - id: isort
         | 
| 40 | 
            +
                    name: Format imports
         | 
| 41 | 
            +
             | 
| 42 | 
            +
              - repo: https://github.com/psf/black
         | 
| 43 | 
            +
                rev: 22.12.0
         | 
| 44 | 
            +
                hooks:
         | 
| 45 | 
            +
                  - id: black
         | 
| 46 | 
            +
                    name: Format code
         | 
| 47 | 
            +
                    additional_dependencies: ['click==8.0.2']
         | 
| 48 | 
            +
             | 
| 49 | 
            +
              - repo: https://github.com/charliermarsh/ruff-pre-commit
         | 
| 50 | 
            +
                # Ruff version.
         | 
| 51 | 
            +
                rev: 'v0.0.267'
         | 
| 52 | 
            +
                hooks:
         | 
| 53 | 
            +
                  - id: ruff
         | 
    	
        Makefile
    ADDED
    
    | @@ -0,0 +1,13 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            .PHONY: style format
         | 
| 2 | 
            +
             | 
| 3 | 
            +
             | 
| 4 | 
            +
            style:
         | 
| 5 | 
            +
            	python -m black --line-length 119 .
         | 
| 6 | 
            +
            	python -m isort .
         | 
| 7 | 
            +
            	ruff check --fix .
         | 
| 8 | 
            +
             | 
| 9 | 
            +
             | 
| 10 | 
            +
            quality:
         | 
| 11 | 
            +
            	python -m black --check --line-length 119 .
         | 
| 12 | 
            +
            	python -m isort --check-only .
         | 
| 13 | 
            +
            	ruff check .
         | 
    	
        README.md
    ADDED
    
    | @@ -0,0 +1,44 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            ---
         | 
| 2 | 
            +
            title: Demo Leaderboard
         | 
| 3 | 
            +
            emoji: 🥇
         | 
| 4 | 
            +
            colorFrom: green
         | 
| 5 | 
            +
            colorTo: indigo
         | 
| 6 | 
            +
            sdk: gradio
         | 
| 7 | 
            +
            app_file: app.py
         | 
| 8 | 
            +
            pinned: true
         | 
| 9 | 
            +
            license: apache-2.0
         | 
| 10 | 
            +
            ---
         | 
| 11 | 
            +
             | 
| 12 | 
            +
            # Start the configuration
         | 
| 13 | 
            +
             | 
| 14 | 
            +
            Most of the variables to change for a default leaderboard are in `src/env.py` (replace the path for your leaderboard) and `src/about.py` (for tasks).
         | 
| 15 | 
            +
             | 
| 16 | 
            +
            Results files should have the following format and be stored as json files:
         | 
| 17 | 
            +
            ```json
         | 
| 18 | 
            +
            {
         | 
| 19 | 
            +
                "config": {
         | 
| 20 | 
            +
                    "model_dtype": "torch.float16", # or torch.bfloat16 or 8bit or 4bit
         | 
| 21 | 
            +
                    "model_name": "path of the model on the hub: org/model",
         | 
| 22 | 
            +
                    "model_sha": "revision on the hub",
         | 
| 23 | 
            +
                },
         | 
| 24 | 
            +
                "results": {
         | 
| 25 | 
            +
                    "task_name": {
         | 
| 26 | 
            +
                        "metric_name": score,
         | 
| 27 | 
            +
                    },
         | 
| 28 | 
            +
                    "task_name2": {
         | 
| 29 | 
            +
                        "metric_name": score,
         | 
| 30 | 
            +
                    }
         | 
| 31 | 
            +
                }
         | 
| 32 | 
            +
            }
         | 
| 33 | 
            +
            ```
         | 
| 34 | 
            +
             | 
| 35 | 
            +
            Request files are created automatically by this tool.
         | 
| 36 | 
            +
             | 
| 37 | 
            +
            If you encounter problem on the space, don't hesitate to restart it to remove the create eval-queue, eval-queue-bk, eval-results and eval-results-bk created folder.
         | 
| 38 | 
            +
             | 
| 39 | 
            +
            # Code logic for more complex edits
         | 
| 40 | 
            +
             | 
| 41 | 
            +
            You'll find 
         | 
| 42 | 
            +
            - the main table' columns names and properties in `src/display/utils.py`
         | 
| 43 | 
            +
            - the logic to read all results and request files, then convert them in dataframe lines, in `src/leaderboard/read_evals.py`, and `src/populate.py`
         | 
| 44 | 
            +
            - the logic to allow or filter submissions in `src/submission/submit.py` and `src/submission/check_validity.py`
         | 
    	
        app.py
    ADDED
    
    | @@ -0,0 +1,204 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import gradio as gr
         | 
| 2 | 
            +
            from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns
         | 
| 3 | 
            +
            import pandas as pd
         | 
| 4 | 
            +
            from apscheduler.schedulers.background import BackgroundScheduler
         | 
| 5 | 
            +
            from huggingface_hub import snapshot_download
         | 
| 6 | 
            +
             | 
| 7 | 
            +
            from src.about import (
         | 
| 8 | 
            +
                CITATION_BUTTON_LABEL,
         | 
| 9 | 
            +
                CITATION_BUTTON_TEXT,
         | 
| 10 | 
            +
                EVALUATION_QUEUE_TEXT,
         | 
| 11 | 
            +
                INTRODUCTION_TEXT,
         | 
| 12 | 
            +
                LLM_BENCHMARKS_TEXT,
         | 
| 13 | 
            +
                TITLE,
         | 
| 14 | 
            +
            )
         | 
| 15 | 
            +
            from src.display.css_html_js import custom_css
         | 
| 16 | 
            +
            from src.display.utils import (
         | 
| 17 | 
            +
                BENCHMARK_COLS,
         | 
| 18 | 
            +
                COLS,
         | 
| 19 | 
            +
                EVAL_COLS,
         | 
| 20 | 
            +
                EVAL_TYPES,
         | 
| 21 | 
            +
                AutoEvalColumn,
         | 
| 22 | 
            +
                ModelType,
         | 
| 23 | 
            +
                fields,
         | 
| 24 | 
            +
                WeightType,
         | 
| 25 | 
            +
                Precision
         | 
| 26 | 
            +
            )
         | 
| 27 | 
            +
            from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
         | 
| 28 | 
            +
            from src.populate import get_evaluation_queue_df, get_leaderboard_df
         | 
| 29 | 
            +
            from src.submission.submit import add_new_eval
         | 
| 30 | 
            +
             | 
| 31 | 
            +
             | 
| 32 | 
            +
            def restart_space():
         | 
| 33 | 
            +
                API.restart_space(repo_id=REPO_ID)
         | 
| 34 | 
            +
             | 
| 35 | 
            +
            ### Space initialisation
         | 
| 36 | 
            +
            try:
         | 
| 37 | 
            +
                print(EVAL_REQUESTS_PATH)
         | 
| 38 | 
            +
                snapshot_download(
         | 
| 39 | 
            +
                    repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
         | 
| 40 | 
            +
                )
         | 
| 41 | 
            +
            except Exception:
         | 
| 42 | 
            +
                restart_space()
         | 
| 43 | 
            +
            try:
         | 
| 44 | 
            +
                print(EVAL_RESULTS_PATH)
         | 
| 45 | 
            +
                snapshot_download(
         | 
| 46 | 
            +
                    repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
         | 
| 47 | 
            +
                )
         | 
| 48 | 
            +
            except Exception:
         | 
| 49 | 
            +
                restart_space()
         | 
| 50 | 
            +
             | 
| 51 | 
            +
             | 
| 52 | 
            +
            LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
         | 
| 53 | 
            +
             | 
| 54 | 
            +
            (
         | 
| 55 | 
            +
                finished_eval_queue_df,
         | 
| 56 | 
            +
                running_eval_queue_df,
         | 
| 57 | 
            +
                pending_eval_queue_df,
         | 
| 58 | 
            +
            ) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
         | 
| 59 | 
            +
             | 
| 60 | 
            +
            def init_leaderboard(dataframe):
         | 
| 61 | 
            +
                if dataframe is None or dataframe.empty:
         | 
| 62 | 
            +
                    raise ValueError("Leaderboard DataFrame is empty or None.")
         | 
| 63 | 
            +
                return Leaderboard(
         | 
| 64 | 
            +
                    value=dataframe,
         | 
| 65 | 
            +
                    datatype=[c.type for c in fields(AutoEvalColumn)],
         | 
| 66 | 
            +
                    select_columns=SelectColumns(
         | 
| 67 | 
            +
                        default_selection=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default],
         | 
| 68 | 
            +
                        cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden],
         | 
| 69 | 
            +
                        label="Select Columns to Display:",
         | 
| 70 | 
            +
                    ),
         | 
| 71 | 
            +
                    search_columns=[AutoEvalColumn.model.name, AutoEvalColumn.license.name],
         | 
| 72 | 
            +
                    hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
         | 
| 73 | 
            +
                    filter_columns=[
         | 
| 74 | 
            +
                        ColumnFilter(AutoEvalColumn.model_type.name, type="checkboxgroup", label="Model types"),
         | 
| 75 | 
            +
                        ColumnFilter(AutoEvalColumn.precision.name, type="checkboxgroup", label="Precision"),
         | 
| 76 | 
            +
                        ColumnFilter(
         | 
| 77 | 
            +
                            AutoEvalColumn.params.name,
         | 
| 78 | 
            +
                            type="slider",
         | 
| 79 | 
            +
                            min=0.01,
         | 
| 80 | 
            +
                            max=150,
         | 
| 81 | 
            +
                            label="Select the number of parameters (B)",
         | 
| 82 | 
            +
                        ),
         | 
| 83 | 
            +
                        ColumnFilter(
         | 
| 84 | 
            +
                            AutoEvalColumn.still_on_hub.name, type="boolean", label="Deleted/incomplete", default=True
         | 
| 85 | 
            +
                        ),
         | 
| 86 | 
            +
                    ],
         | 
| 87 | 
            +
                    bool_checkboxgroup_label="Hide models",
         | 
| 88 | 
            +
                    interactive=False,
         | 
| 89 | 
            +
                )
         | 
| 90 | 
            +
             | 
| 91 | 
            +
             | 
| 92 | 
            +
            demo = gr.Blocks(css=custom_css)
         | 
| 93 | 
            +
            with demo:
         | 
| 94 | 
            +
                gr.HTML(TITLE)
         | 
| 95 | 
            +
                gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
         | 
| 96 | 
            +
             | 
| 97 | 
            +
                with gr.Tabs(elem_classes="tab-buttons") as tabs:
         | 
| 98 | 
            +
                    with gr.TabItem("🏅 LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
         | 
| 99 | 
            +
                        leaderboard = init_leaderboard(LEADERBOARD_DF)
         | 
| 100 | 
            +
             | 
| 101 | 
            +
                    with gr.TabItem("📝 About", elem_id="llm-benchmark-tab-table", id=2):
         | 
| 102 | 
            +
                        gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
         | 
| 103 | 
            +
             | 
| 104 | 
            +
                    with gr.TabItem("🚀 Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
         | 
| 105 | 
            +
                        with gr.Column():
         | 
| 106 | 
            +
                            with gr.Row():
         | 
| 107 | 
            +
                                gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
         | 
| 108 | 
            +
             | 
| 109 | 
            +
                            with gr.Column():
         | 
| 110 | 
            +
                                with gr.Accordion(
         | 
| 111 | 
            +
                                    f"✅ Finished Evaluations ({len(finished_eval_queue_df)})",
         | 
| 112 | 
            +
                                    open=False,
         | 
| 113 | 
            +
                                ):
         | 
| 114 | 
            +
                                    with gr.Row():
         | 
| 115 | 
            +
                                        finished_eval_table = gr.components.Dataframe(
         | 
| 116 | 
            +
                                            value=finished_eval_queue_df,
         | 
| 117 | 
            +
                                            headers=EVAL_COLS,
         | 
| 118 | 
            +
                                            datatype=EVAL_TYPES,
         | 
| 119 | 
            +
                                            row_count=5,
         | 
| 120 | 
            +
                                        )
         | 
| 121 | 
            +
                                with gr.Accordion(
         | 
| 122 | 
            +
                                    f"🔄 Running Evaluation Queue ({len(running_eval_queue_df)})",
         | 
| 123 | 
            +
                                    open=False,
         | 
| 124 | 
            +
                                ):
         | 
| 125 | 
            +
                                    with gr.Row():
         | 
| 126 | 
            +
                                        running_eval_table = gr.components.Dataframe(
         | 
| 127 | 
            +
                                            value=running_eval_queue_df,
         | 
| 128 | 
            +
                                            headers=EVAL_COLS,
         | 
| 129 | 
            +
                                            datatype=EVAL_TYPES,
         | 
| 130 | 
            +
                                            row_count=5,
         | 
| 131 | 
            +
                                        )
         | 
| 132 | 
            +
             | 
| 133 | 
            +
                                with gr.Accordion(
         | 
| 134 | 
            +
                                    f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
         | 
| 135 | 
            +
                                    open=False,
         | 
| 136 | 
            +
                                ):
         | 
| 137 | 
            +
                                    with gr.Row():
         | 
| 138 | 
            +
                                        pending_eval_table = gr.components.Dataframe(
         | 
| 139 | 
            +
                                            value=pending_eval_queue_df,
         | 
| 140 | 
            +
                                            headers=EVAL_COLS,
         | 
| 141 | 
            +
                                            datatype=EVAL_TYPES,
         | 
| 142 | 
            +
                                            row_count=5,
         | 
| 143 | 
            +
                                        )
         | 
| 144 | 
            +
                        with gr.Row():
         | 
| 145 | 
            +
                            gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
         | 
| 146 | 
            +
             | 
| 147 | 
            +
                        with gr.Row():
         | 
| 148 | 
            +
                            with gr.Column():
         | 
| 149 | 
            +
                                model_name_textbox = gr.Textbox(label="Model name")
         | 
| 150 | 
            +
                                revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
         | 
| 151 | 
            +
                                model_type = gr.Dropdown(
         | 
| 152 | 
            +
                                    choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
         | 
| 153 | 
            +
                                    label="Model type",
         | 
| 154 | 
            +
                                    multiselect=False,
         | 
| 155 | 
            +
                                    value=None,
         | 
| 156 | 
            +
                                    interactive=True,
         | 
| 157 | 
            +
                                )
         | 
| 158 | 
            +
             | 
| 159 | 
            +
                            with gr.Column():
         | 
| 160 | 
            +
                                precision = gr.Dropdown(
         | 
| 161 | 
            +
                                    choices=[i.value.name for i in Precision if i != Precision.Unknown],
         | 
| 162 | 
            +
                                    label="Precision",
         | 
| 163 | 
            +
                                    multiselect=False,
         | 
| 164 | 
            +
                                    value="float16",
         | 
| 165 | 
            +
                                    interactive=True,
         | 
| 166 | 
            +
                                )
         | 
| 167 | 
            +
                                weight_type = gr.Dropdown(
         | 
| 168 | 
            +
                                    choices=[i.value.name for i in WeightType],
         | 
| 169 | 
            +
                                    label="Weights type",
         | 
| 170 | 
            +
                                    multiselect=False,
         | 
| 171 | 
            +
                                    value="Original",
         | 
| 172 | 
            +
                                    interactive=True,
         | 
| 173 | 
            +
                                )
         | 
| 174 | 
            +
                                base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
         | 
| 175 | 
            +
             | 
| 176 | 
            +
                        submit_button = gr.Button("Submit Eval")
         | 
| 177 | 
            +
                        submission_result = gr.Markdown()
         | 
| 178 | 
            +
                        submit_button.click(
         | 
| 179 | 
            +
                            add_new_eval,
         | 
| 180 | 
            +
                            [
         | 
| 181 | 
            +
                                model_name_textbox,
         | 
| 182 | 
            +
                                base_model_name_textbox,
         | 
| 183 | 
            +
                                revision_name_textbox,
         | 
| 184 | 
            +
                                precision,
         | 
| 185 | 
            +
                                weight_type,
         | 
| 186 | 
            +
                                model_type,
         | 
| 187 | 
            +
                            ],
         | 
| 188 | 
            +
                            submission_result,
         | 
| 189 | 
            +
                        )
         | 
| 190 | 
            +
             | 
| 191 | 
            +
                with gr.Row():
         | 
| 192 | 
            +
                    with gr.Accordion("📙 Citation", open=False):
         | 
| 193 | 
            +
                        citation_button = gr.Textbox(
         | 
| 194 | 
            +
                            value=CITATION_BUTTON_TEXT,
         | 
| 195 | 
            +
                            label=CITATION_BUTTON_LABEL,
         | 
| 196 | 
            +
                            lines=20,
         | 
| 197 | 
            +
                            elem_id="citation-button",
         | 
| 198 | 
            +
                            show_copy_button=True,
         | 
| 199 | 
            +
                        )
         | 
| 200 | 
            +
             | 
| 201 | 
            +
            scheduler = BackgroundScheduler()
         | 
| 202 | 
            +
            scheduler.add_job(restart_space, "interval", seconds=1800)
         | 
| 203 | 
            +
            scheduler.start()
         | 
| 204 | 
            +
            demo.queue(default_concurrency_limit=40).launch()
         | 
    	
        pyproject.toml
    ADDED
    
    | @@ -0,0 +1,13 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            [tool.ruff]
         | 
| 2 | 
            +
            # Enable pycodestyle (`E`) and Pyflakes (`F`) codes by default.
         | 
| 3 | 
            +
            select = ["E", "F"]
         | 
| 4 | 
            +
            ignore = ["E501"] # line too long (black is taking care of this)
         | 
| 5 | 
            +
            line-length = 119
         | 
| 6 | 
            +
            fixable = ["A", "B", "C", "D", "E", "F", "G", "I", "N", "Q", "S", "T", "W", "ANN", "ARG", "BLE", "COM", "DJ", "DTZ", "EM", "ERA", "EXE", "FBT", "ICN", "INP", "ISC", "NPY", "PD", "PGH", "PIE", "PL", "PT", "PTH", "PYI", "RET", "RSE", "RUF", "SIM", "SLF", "TCH", "TID", "TRY", "UP", "YTT"]
         | 
| 7 | 
            +
             | 
| 8 | 
            +
            [tool.isort]
         | 
| 9 | 
            +
            profile = "black"
         | 
| 10 | 
            +
            line_length = 119
         | 
| 11 | 
            +
             | 
| 12 | 
            +
            [tool.black]
         | 
| 13 | 
            +
            line-length = 119
         | 
    	
        requirements.txt
    ADDED
    
    | @@ -0,0 +1,16 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            APScheduler
         | 
| 2 | 
            +
            black
         | 
| 3 | 
            +
            datasets
         | 
| 4 | 
            +
            gradio
         | 
| 5 | 
            +
            gradio[oauth]
         | 
| 6 | 
            +
            gradio_leaderboard==0.0.9
         | 
| 7 | 
            +
            gradio_client
         | 
| 8 | 
            +
            huggingface-hub>=0.18.0
         | 
| 9 | 
            +
            matplotlib
         | 
| 10 | 
            +
            numpy
         | 
| 11 | 
            +
            pandas
         | 
| 12 | 
            +
            python-dateutil
         | 
| 13 | 
            +
            tqdm
         | 
| 14 | 
            +
            transformers
         | 
| 15 | 
            +
            tokenizers>=0.15.0
         | 
| 16 | 
            +
            sentencepiece
         | 
    	
        src/about.py
    ADDED
    
    | @@ -0,0 +1,72 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            from dataclasses import dataclass
         | 
| 2 | 
            +
            from enum import Enum
         | 
| 3 | 
            +
             | 
| 4 | 
            +
            @dataclass
         | 
| 5 | 
            +
            class Task:
         | 
| 6 | 
            +
                benchmark: str
         | 
| 7 | 
            +
                metric: str
         | 
| 8 | 
            +
                col_name: str
         | 
| 9 | 
            +
             | 
| 10 | 
            +
             | 
| 11 | 
            +
            # Select your tasks here
         | 
| 12 | 
            +
            # ---------------------------------------------------
         | 
| 13 | 
            +
            class Tasks(Enum):
         | 
| 14 | 
            +
                # task_key in the json file, metric_key in the json file, name to display in the leaderboard 
         | 
| 15 | 
            +
                task0 = Task("anli_r1", "acc", "ANLI")
         | 
| 16 | 
            +
                task1 = Task("logiqa", "acc_norm", "LogiQA")
         | 
| 17 | 
            +
             | 
| 18 | 
            +
            NUM_FEWSHOT = 0 # Change with your few shot
         | 
| 19 | 
            +
            # ---------------------------------------------------
         | 
| 20 | 
            +
             | 
| 21 | 
            +
             | 
| 22 | 
            +
             | 
| 23 | 
            +
            # Your leaderboard name
         | 
| 24 | 
            +
            TITLE = """<h1 align="center" id="space-title">Demo leaderboard</h1>"""
         | 
| 25 | 
            +
             | 
| 26 | 
            +
            # What does your leaderboard evaluate?
         | 
| 27 | 
            +
            INTRODUCTION_TEXT = """
         | 
| 28 | 
            +
            Intro text
         | 
| 29 | 
            +
            """
         | 
| 30 | 
            +
             | 
| 31 | 
            +
            # Which evaluations are you running? how can people reproduce what you have?
         | 
| 32 | 
            +
            LLM_BENCHMARKS_TEXT = f"""
         | 
| 33 | 
            +
            ## How it works
         | 
| 34 | 
            +
             | 
| 35 | 
            +
            ## Reproducibility
         | 
| 36 | 
            +
            To reproduce our results, here is the commands you can run:
         | 
| 37 | 
            +
             | 
| 38 | 
            +
            """
         | 
| 39 | 
            +
             | 
| 40 | 
            +
            EVALUATION_QUEUE_TEXT = """
         | 
| 41 | 
            +
            ## Some good practices before submitting a model
         | 
| 42 | 
            +
             | 
| 43 | 
            +
            ### 1) Make sure you can load your model and tokenizer using AutoClasses:
         | 
| 44 | 
            +
            ```python
         | 
| 45 | 
            +
            from transformers import AutoConfig, AutoModel, AutoTokenizer
         | 
| 46 | 
            +
            config = AutoConfig.from_pretrained("your model name", revision=revision)
         | 
| 47 | 
            +
            model = AutoModel.from_pretrained("your model name", revision=revision)
         | 
| 48 | 
            +
            tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision)
         | 
| 49 | 
            +
            ```
         | 
| 50 | 
            +
            If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded.
         | 
| 51 | 
            +
             | 
| 52 | 
            +
            Note: make sure your model is public!
         | 
| 53 | 
            +
            Note: if your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted!
         | 
| 54 | 
            +
             | 
| 55 | 
            +
            ### 2) Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index)
         | 
| 56 | 
            +
            It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`!
         | 
| 57 | 
            +
             | 
| 58 | 
            +
            ### 3) Make sure your model has an open license!
         | 
| 59 | 
            +
            This is a leaderboard for Open LLMs, and we'd love for as many people as possible to know they can use your model 🤗
         | 
| 60 | 
            +
             | 
| 61 | 
            +
            ### 4) Fill up your model card
         | 
| 62 | 
            +
            When we add extra information about models to the leaderboard, it will be automatically taken from the model card
         | 
| 63 | 
            +
             | 
| 64 | 
            +
            ## In case of model failure
         | 
| 65 | 
            +
            If your model is displayed in the `FAILED` category, its execution stopped.
         | 
| 66 | 
            +
            Make sure you have followed the above steps first.
         | 
| 67 | 
            +
            If everything is done, check you can launch the EleutherAIHarness on your model locally, using the above command without modifications (you can add `--limit` to limit the number of examples per task).
         | 
| 68 | 
            +
            """
         | 
| 69 | 
            +
             | 
| 70 | 
            +
            CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
         | 
| 71 | 
            +
            CITATION_BUTTON_TEXT = r"""
         | 
| 72 | 
            +
            """
         | 
    	
        src/display/css_html_js.py
    ADDED
    
    | @@ -0,0 +1,105 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            custom_css = """
         | 
| 2 | 
            +
             | 
| 3 | 
            +
            .markdown-text {
         | 
| 4 | 
            +
                font-size: 16px !important;
         | 
| 5 | 
            +
            }
         | 
| 6 | 
            +
             | 
| 7 | 
            +
            #models-to-add-text {
         | 
| 8 | 
            +
                font-size: 18px !important;
         | 
| 9 | 
            +
            }
         | 
| 10 | 
            +
             | 
| 11 | 
            +
            #citation-button span {
         | 
| 12 | 
            +
                font-size: 16px !important;
         | 
| 13 | 
            +
            }
         | 
| 14 | 
            +
             | 
| 15 | 
            +
            #citation-button textarea {
         | 
| 16 | 
            +
                font-size: 16px !important;
         | 
| 17 | 
            +
            }
         | 
| 18 | 
            +
             | 
| 19 | 
            +
            #citation-button > label > button {
         | 
| 20 | 
            +
                margin: 6px;
         | 
| 21 | 
            +
                transform: scale(1.3);
         | 
| 22 | 
            +
            }
         | 
| 23 | 
            +
             | 
| 24 | 
            +
            #leaderboard-table {
         | 
| 25 | 
            +
                margin-top: 15px
         | 
| 26 | 
            +
            }
         | 
| 27 | 
            +
             | 
| 28 | 
            +
            #leaderboard-table-lite {
         | 
| 29 | 
            +
                margin-top: 15px
         | 
| 30 | 
            +
            }
         | 
| 31 | 
            +
             | 
| 32 | 
            +
            #search-bar-table-box > div:first-child {
         | 
| 33 | 
            +
                background: none;
         | 
| 34 | 
            +
                border: none;
         | 
| 35 | 
            +
            }
         | 
| 36 | 
            +
             
         | 
| 37 | 
            +
            #search-bar {
         | 
| 38 | 
            +
                padding: 0px;
         | 
| 39 | 
            +
            }
         | 
| 40 | 
            +
             | 
| 41 | 
            +
            /* Limit the width of the first AutoEvalColumn so that names don't expand too much */
         | 
| 42 | 
            +
            table td:first-child,
         | 
| 43 | 
            +
            table th:first-child {
         | 
| 44 | 
            +
                max-width: 400px;
         | 
| 45 | 
            +
                overflow: auto;
         | 
| 46 | 
            +
                white-space: nowrap;
         | 
| 47 | 
            +
            }
         | 
| 48 | 
            +
             | 
| 49 | 
            +
            .tab-buttons button {
         | 
| 50 | 
            +
                font-size: 20px;
         | 
| 51 | 
            +
            }
         | 
| 52 | 
            +
             | 
| 53 | 
            +
            #scale-logo {
         | 
| 54 | 
            +
                border-style: none !important;
         | 
| 55 | 
            +
                box-shadow: none;
         | 
| 56 | 
            +
                display: block;
         | 
| 57 | 
            +
                margin-left: auto;
         | 
| 58 | 
            +
                margin-right: auto;
         | 
| 59 | 
            +
                max-width: 600px;
         | 
| 60 | 
            +
            }
         | 
| 61 | 
            +
             | 
| 62 | 
            +
            #scale-logo .download {
         | 
| 63 | 
            +
                display: none;
         | 
| 64 | 
            +
            }
         | 
| 65 | 
            +
            #filter_type{
         | 
| 66 | 
            +
                border: 0;
         | 
| 67 | 
            +
                padding-left: 0;
         | 
| 68 | 
            +
                padding-top: 0;
         | 
| 69 | 
            +
            }
         | 
| 70 | 
            +
            #filter_type label {
         | 
| 71 | 
            +
                display: flex;
         | 
| 72 | 
            +
            }
         | 
| 73 | 
            +
            #filter_type label > span{
         | 
| 74 | 
            +
                margin-top: var(--spacing-lg);
         | 
| 75 | 
            +
                margin-right: 0.5em;
         | 
| 76 | 
            +
            }
         | 
| 77 | 
            +
            #filter_type label > .wrap{
         | 
| 78 | 
            +
                width: 103px;
         | 
| 79 | 
            +
            }
         | 
| 80 | 
            +
            #filter_type label > .wrap .wrap-inner{  
         | 
| 81 | 
            +
                padding: 2px;
         | 
| 82 | 
            +
            }
         | 
| 83 | 
            +
            #filter_type label > .wrap .wrap-inner input{
         | 
| 84 | 
            +
                width: 1px
         | 
| 85 | 
            +
            }
         | 
| 86 | 
            +
            #filter-columns-type{
         | 
| 87 | 
            +
                border:0;
         | 
| 88 | 
            +
                padding:0.5;
         | 
| 89 | 
            +
            }
         | 
| 90 | 
            +
            #filter-columns-size{
         | 
| 91 | 
            +
                border:0;
         | 
| 92 | 
            +
                padding:0.5;
         | 
| 93 | 
            +
            }
         | 
| 94 | 
            +
            #box-filter > .form{
         | 
| 95 | 
            +
                border: 0
         | 
| 96 | 
            +
            }
         | 
| 97 | 
            +
            """
         | 
| 98 | 
            +
             | 
| 99 | 
            +
            get_window_url_params = """
         | 
| 100 | 
            +
                function(url_params) {
         | 
| 101 | 
            +
                    const params = new URLSearchParams(window.location.search);
         | 
| 102 | 
            +
                    url_params = Object.fromEntries(params);
         | 
| 103 | 
            +
                    return url_params;
         | 
| 104 | 
            +
                }
         | 
| 105 | 
            +
                """
         | 
    	
        src/display/formatting.py
    ADDED
    
    | @@ -0,0 +1,27 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            def model_hyperlink(link, model_name):
         | 
| 2 | 
            +
                return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
         | 
| 3 | 
            +
             | 
| 4 | 
            +
             | 
| 5 | 
            +
            def make_clickable_model(model_name):
         | 
| 6 | 
            +
                link = f"https://huggingface.co/{model_name}"
         | 
| 7 | 
            +
                return model_hyperlink(link, model_name)
         | 
| 8 | 
            +
             | 
| 9 | 
            +
             | 
| 10 | 
            +
            def styled_error(error):
         | 
| 11 | 
            +
                return f"<p style='color: red; font-size: 20px; text-align: center;'>{error}</p>"
         | 
| 12 | 
            +
             | 
| 13 | 
            +
             | 
| 14 | 
            +
            def styled_warning(warn):
         | 
| 15 | 
            +
                return f"<p style='color: orange; font-size: 20px; text-align: center;'>{warn}</p>"
         | 
| 16 | 
            +
             | 
| 17 | 
            +
             | 
| 18 | 
            +
            def styled_message(message):
         | 
| 19 | 
            +
                return f"<p style='color: green; font-size: 20px; text-align: center;'>{message}</p>"
         | 
| 20 | 
            +
             | 
| 21 | 
            +
             | 
| 22 | 
            +
            def has_no_nan_values(df, columns):
         | 
| 23 | 
            +
                return df[columns].notna().all(axis=1)
         | 
| 24 | 
            +
             | 
| 25 | 
            +
             | 
| 26 | 
            +
            def has_nan_values(df, columns):
         | 
| 27 | 
            +
                return df[columns].isna().any(axis=1)
         | 
    	
        src/display/utils.py
    ADDED
    
    | @@ -0,0 +1,110 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            from dataclasses import dataclass, make_dataclass
         | 
| 2 | 
            +
            from enum import Enum
         | 
| 3 | 
            +
             | 
| 4 | 
            +
            import pandas as pd
         | 
| 5 | 
            +
             | 
| 6 | 
            +
            from src.about import Tasks
         | 
| 7 | 
            +
             | 
| 8 | 
            +
            def fields(raw_class):
         | 
| 9 | 
            +
                return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
         | 
| 10 | 
            +
             | 
| 11 | 
            +
             | 
| 12 | 
            +
            # These classes are for user facing column names,
         | 
| 13 | 
            +
            # to avoid having to change them all around the code
         | 
| 14 | 
            +
            # when a modif is needed
         | 
| 15 | 
            +
            @dataclass
         | 
| 16 | 
            +
            class ColumnContent:
         | 
| 17 | 
            +
                name: str
         | 
| 18 | 
            +
                type: str
         | 
| 19 | 
            +
                displayed_by_default: bool
         | 
| 20 | 
            +
                hidden: bool = False
         | 
| 21 | 
            +
                never_hidden: bool = False
         | 
| 22 | 
            +
             | 
| 23 | 
            +
            ## Leaderboard columns
         | 
| 24 | 
            +
            auto_eval_column_dict = []
         | 
| 25 | 
            +
            # Init
         | 
| 26 | 
            +
            auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
         | 
| 27 | 
            +
            auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
         | 
| 28 | 
            +
            #Scores
         | 
| 29 | 
            +
            auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True)])
         | 
| 30 | 
            +
            for task in Tasks:
         | 
| 31 | 
            +
                auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
         | 
| 32 | 
            +
            # Model information
         | 
| 33 | 
            +
            auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
         | 
| 34 | 
            +
            auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
         | 
| 35 | 
            +
            auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, True)])
         | 
| 36 | 
            +
            auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False)])
         | 
| 37 | 
            +
            auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False)])
         | 
| 38 | 
            +
            auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)])
         | 
| 39 | 
            +
            auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)])
         | 
| 40 | 
            +
            auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
         | 
| 41 | 
            +
            auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
         | 
| 42 | 
            +
             | 
| 43 | 
            +
            # We use make dataclass to dynamically fill the scores from Tasks
         | 
| 44 | 
            +
            AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
         | 
| 45 | 
            +
             | 
| 46 | 
            +
            ## For the queue columns in the submission tab
         | 
| 47 | 
            +
            @dataclass(frozen=True)
         | 
| 48 | 
            +
            class EvalQueueColumn:  # Queue column
         | 
| 49 | 
            +
                model = ColumnContent("model", "markdown", True)
         | 
| 50 | 
            +
                revision = ColumnContent("revision", "str", True)
         | 
| 51 | 
            +
                private = ColumnContent("private", "bool", True)
         | 
| 52 | 
            +
                precision = ColumnContent("precision", "str", True)
         | 
| 53 | 
            +
                weight_type = ColumnContent("weight_type", "str", "Original")
         | 
| 54 | 
            +
                status = ColumnContent("status", "str", True)
         | 
| 55 | 
            +
             | 
| 56 | 
            +
            ## All the model information that we might need
         | 
| 57 | 
            +
            @dataclass
         | 
| 58 | 
            +
            class ModelDetails:
         | 
| 59 | 
            +
                name: str
         | 
| 60 | 
            +
                display_name: str = ""
         | 
| 61 | 
            +
                symbol: str = "" # emoji
         | 
| 62 | 
            +
             | 
| 63 | 
            +
             | 
| 64 | 
            +
            class ModelType(Enum):
         | 
| 65 | 
            +
                PT = ModelDetails(name="pretrained", symbol="🟢")
         | 
| 66 | 
            +
                FT = ModelDetails(name="fine-tuned", symbol="🔶")
         | 
| 67 | 
            +
                IFT = ModelDetails(name="instruction-tuned", symbol="⭕")
         | 
| 68 | 
            +
                RL = ModelDetails(name="RL-tuned", symbol="🟦")
         | 
| 69 | 
            +
                Unknown = ModelDetails(name="", symbol="?")
         | 
| 70 | 
            +
             | 
| 71 | 
            +
                def to_str(self, separator=" "):
         | 
| 72 | 
            +
                    return f"{self.value.symbol}{separator}{self.value.name}"
         | 
| 73 | 
            +
             | 
| 74 | 
            +
                @staticmethod
         | 
| 75 | 
            +
                def from_str(type):
         | 
| 76 | 
            +
                    if "fine-tuned" in type or "🔶" in type:
         | 
| 77 | 
            +
                        return ModelType.FT
         | 
| 78 | 
            +
                    if "pretrained" in type or "🟢" in type:
         | 
| 79 | 
            +
                        return ModelType.PT
         | 
| 80 | 
            +
                    if "RL-tuned" in type or "🟦" in type:
         | 
| 81 | 
            +
                        return ModelType.RL
         | 
| 82 | 
            +
                    if "instruction-tuned" in type or "⭕" in type:
         | 
| 83 | 
            +
                        return ModelType.IFT
         | 
| 84 | 
            +
                    return ModelType.Unknown
         | 
| 85 | 
            +
             | 
| 86 | 
            +
            class WeightType(Enum):
         | 
| 87 | 
            +
                Adapter = ModelDetails("Adapter")
         | 
| 88 | 
            +
                Original = ModelDetails("Original")
         | 
| 89 | 
            +
                Delta = ModelDetails("Delta")
         | 
| 90 | 
            +
             | 
| 91 | 
            +
            class Precision(Enum):
         | 
| 92 | 
            +
                float16 = ModelDetails("float16")
         | 
| 93 | 
            +
                bfloat16 = ModelDetails("bfloat16")
         | 
| 94 | 
            +
                Unknown = ModelDetails("?")
         | 
| 95 | 
            +
             | 
| 96 | 
            +
                def from_str(precision):
         | 
| 97 | 
            +
                    if precision in ["torch.float16", "float16"]:
         | 
| 98 | 
            +
                        return Precision.float16
         | 
| 99 | 
            +
                    if precision in ["torch.bfloat16", "bfloat16"]:
         | 
| 100 | 
            +
                        return Precision.bfloat16
         | 
| 101 | 
            +
                    return Precision.Unknown
         | 
| 102 | 
            +
             | 
| 103 | 
            +
            # Column selection
         | 
| 104 | 
            +
            COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]
         | 
| 105 | 
            +
             | 
| 106 | 
            +
            EVAL_COLS = [c.name for c in fields(EvalQueueColumn)]
         | 
| 107 | 
            +
            EVAL_TYPES = [c.type for c in fields(EvalQueueColumn)]
         | 
| 108 | 
            +
             | 
| 109 | 
            +
            BENCHMARK_COLS = [t.value.col_name for t in Tasks]
         | 
| 110 | 
            +
             | 
    	
        src/envs.py
    ADDED
    
    | @@ -0,0 +1,25 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import os
         | 
| 2 | 
            +
             | 
| 3 | 
            +
            from huggingface_hub import HfApi
         | 
| 4 | 
            +
             | 
| 5 | 
            +
            # Info to change for your repository
         | 
| 6 | 
            +
            # ----------------------------------
         | 
| 7 | 
            +
            TOKEN = os.environ.get("HF_TOKEN") # A read/write token for your org
         | 
| 8 | 
            +
             | 
| 9 | 
            +
            OWNER = "demo-leaderboard-backend" # Change to your org - don't forget to create a results and request dataset, with the correct format!
         | 
| 10 | 
            +
            # ----------------------------------
         | 
| 11 | 
            +
             | 
| 12 | 
            +
            REPO_ID = f"{OWNER}/leaderboard"
         | 
| 13 | 
            +
            QUEUE_REPO = f"{OWNER}/requests"
         | 
| 14 | 
            +
            RESULTS_REPO = f"{OWNER}/results"
         | 
| 15 | 
            +
             | 
| 16 | 
            +
            # If you setup a cache later, just change HF_HOME
         | 
| 17 | 
            +
            CACHE_PATH=os.getenv("HF_HOME", ".")
         | 
| 18 | 
            +
             | 
| 19 | 
            +
            # Local caches
         | 
| 20 | 
            +
            EVAL_REQUESTS_PATH = os.path.join(CACHE_PATH, "eval-queue")
         | 
| 21 | 
            +
            EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results")
         | 
| 22 | 
            +
            EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
         | 
| 23 | 
            +
            EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
         | 
| 24 | 
            +
             | 
| 25 | 
            +
            API = HfApi(token=TOKEN)
         | 
    	
        src/leaderboard/read_evals.py
    ADDED
    
    | @@ -0,0 +1,196 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import glob
         | 
| 2 | 
            +
            import json
         | 
| 3 | 
            +
            import math
         | 
| 4 | 
            +
            import os
         | 
| 5 | 
            +
            from dataclasses import dataclass
         | 
| 6 | 
            +
             | 
| 7 | 
            +
            import dateutil
         | 
| 8 | 
            +
            import numpy as np
         | 
| 9 | 
            +
             | 
| 10 | 
            +
            from src.display.formatting import make_clickable_model
         | 
| 11 | 
            +
            from src.display.utils import AutoEvalColumn, ModelType, Tasks, Precision, WeightType
         | 
| 12 | 
            +
            from src.submission.check_validity import is_model_on_hub
         | 
| 13 | 
            +
             | 
| 14 | 
            +
             | 
| 15 | 
            +
            @dataclass
         | 
| 16 | 
            +
            class EvalResult:
         | 
| 17 | 
            +
                """Represents one full evaluation. Built from a combination of the result and request file for a given run.
         | 
| 18 | 
            +
                """
         | 
| 19 | 
            +
                eval_name: str # org_model_precision (uid)
         | 
| 20 | 
            +
                full_model: str # org/model (path on hub)
         | 
| 21 | 
            +
                org: str 
         | 
| 22 | 
            +
                model: str
         | 
| 23 | 
            +
                revision: str # commit hash, "" if main
         | 
| 24 | 
            +
                results: dict
         | 
| 25 | 
            +
                precision: Precision = Precision.Unknown
         | 
| 26 | 
            +
                model_type: ModelType = ModelType.Unknown # Pretrained, fine tuned, ...
         | 
| 27 | 
            +
                weight_type: WeightType = WeightType.Original # Original or Adapter
         | 
| 28 | 
            +
                architecture: str = "Unknown" 
         | 
| 29 | 
            +
                license: str = "?"
         | 
| 30 | 
            +
                likes: int = 0
         | 
| 31 | 
            +
                num_params: int = 0
         | 
| 32 | 
            +
                date: str = "" # submission date of request file
         | 
| 33 | 
            +
                still_on_hub: bool = False
         | 
| 34 | 
            +
             | 
| 35 | 
            +
                @classmethod
         | 
| 36 | 
            +
                def init_from_json_file(self, json_filepath):
         | 
| 37 | 
            +
                    """Inits the result from the specific model result file"""
         | 
| 38 | 
            +
                    with open(json_filepath) as fp:
         | 
| 39 | 
            +
                        data = json.load(fp)
         | 
| 40 | 
            +
             | 
| 41 | 
            +
                    config = data.get("config")
         | 
| 42 | 
            +
             | 
| 43 | 
            +
                    # Precision
         | 
| 44 | 
            +
                    precision = Precision.from_str(config.get("model_dtype"))
         | 
| 45 | 
            +
             | 
| 46 | 
            +
                    # Get model and org
         | 
| 47 | 
            +
                    org_and_model = config.get("model_name", config.get("model_args", None))
         | 
| 48 | 
            +
                    org_and_model = org_and_model.split("/", 1)
         | 
| 49 | 
            +
             | 
| 50 | 
            +
                    if len(org_and_model) == 1:
         | 
| 51 | 
            +
                        org = None
         | 
| 52 | 
            +
                        model = org_and_model[0]
         | 
| 53 | 
            +
                        result_key = f"{model}_{precision.value.name}"
         | 
| 54 | 
            +
                    else:
         | 
| 55 | 
            +
                        org = org_and_model[0]
         | 
| 56 | 
            +
                        model = org_and_model[1]
         | 
| 57 | 
            +
                        result_key = f"{org}_{model}_{precision.value.name}"
         | 
| 58 | 
            +
                    full_model = "/".join(org_and_model)
         | 
| 59 | 
            +
             | 
| 60 | 
            +
                    still_on_hub, _, model_config = is_model_on_hub(
         | 
| 61 | 
            +
                        full_model, config.get("model_sha", "main"), trust_remote_code=True, test_tokenizer=False
         | 
| 62 | 
            +
                    )
         | 
| 63 | 
            +
                    architecture = "?"
         | 
| 64 | 
            +
                    if model_config is not None:
         | 
| 65 | 
            +
                        architectures = getattr(model_config, "architectures", None)
         | 
| 66 | 
            +
                        if architectures:
         | 
| 67 | 
            +
                            architecture = ";".join(architectures)
         | 
| 68 | 
            +
             | 
| 69 | 
            +
                    # Extract results available in this file (some results are split in several files)
         | 
| 70 | 
            +
                    results = {}
         | 
| 71 | 
            +
                    for task in Tasks:
         | 
| 72 | 
            +
                        task = task.value
         | 
| 73 | 
            +
             | 
| 74 | 
            +
                        # We average all scores of a given metric (not all metrics are present in all files)
         | 
| 75 | 
            +
                        accs = np.array([v.get(task.metric, None) for k, v in data["results"].items() if task.benchmark == k])
         | 
| 76 | 
            +
                        if accs.size == 0 or any([acc is None for acc in accs]):
         | 
| 77 | 
            +
                            continue
         | 
| 78 | 
            +
             | 
| 79 | 
            +
                        mean_acc = np.mean(accs) * 100.0
         | 
| 80 | 
            +
                        results[task.benchmark] = mean_acc
         | 
| 81 | 
            +
             | 
| 82 | 
            +
                    return self(
         | 
| 83 | 
            +
                        eval_name=result_key,
         | 
| 84 | 
            +
                        full_model=full_model,
         | 
| 85 | 
            +
                        org=org,
         | 
| 86 | 
            +
                        model=model,
         | 
| 87 | 
            +
                        results=results,
         | 
| 88 | 
            +
                        precision=precision,  
         | 
| 89 | 
            +
                        revision= config.get("model_sha", ""),
         | 
| 90 | 
            +
                        still_on_hub=still_on_hub,
         | 
| 91 | 
            +
                        architecture=architecture
         | 
| 92 | 
            +
                    )
         | 
| 93 | 
            +
             | 
| 94 | 
            +
                def update_with_request_file(self, requests_path):
         | 
| 95 | 
            +
                    """Finds the relevant request file for the current model and updates info with it"""
         | 
| 96 | 
            +
                    request_file = get_request_file_for_model(requests_path, self.full_model, self.precision.value.name)
         | 
| 97 | 
            +
             | 
| 98 | 
            +
                    try:
         | 
| 99 | 
            +
                        with open(request_file, "r") as f:
         | 
| 100 | 
            +
                            request = json.load(f)
         | 
| 101 | 
            +
                        self.model_type = ModelType.from_str(request.get("model_type", ""))
         | 
| 102 | 
            +
                        self.weight_type = WeightType[request.get("weight_type", "Original")]
         | 
| 103 | 
            +
                        self.license = request.get("license", "?")
         | 
| 104 | 
            +
                        self.likes = request.get("likes", 0)
         | 
| 105 | 
            +
                        self.num_params = request.get("params", 0)
         | 
| 106 | 
            +
                        self.date = request.get("submitted_time", "")
         | 
| 107 | 
            +
                    except Exception:
         | 
| 108 | 
            +
                        print(f"Could not find request file for {self.org}/{self.model} with precision {self.precision.value.name}")
         | 
| 109 | 
            +
             | 
| 110 | 
            +
                def to_dict(self):
         | 
| 111 | 
            +
                    """Converts the Eval Result to a dict compatible with our dataframe display"""
         | 
| 112 | 
            +
                    average = sum([v for v in self.results.values() if v is not None]) / len(Tasks)
         | 
| 113 | 
            +
                    data_dict = {
         | 
| 114 | 
            +
                        "eval_name": self.eval_name,  # not a column, just a save name,
         | 
| 115 | 
            +
                        AutoEvalColumn.precision.name: self.precision.value.name,
         | 
| 116 | 
            +
                        AutoEvalColumn.model_type.name: self.model_type.value.name,
         | 
| 117 | 
            +
                        AutoEvalColumn.model_type_symbol.name: self.model_type.value.symbol,
         | 
| 118 | 
            +
                        AutoEvalColumn.weight_type.name: self.weight_type.value.name,
         | 
| 119 | 
            +
                        AutoEvalColumn.architecture.name: self.architecture,
         | 
| 120 | 
            +
                        AutoEvalColumn.model.name: make_clickable_model(self.full_model),
         | 
| 121 | 
            +
                        AutoEvalColumn.revision.name: self.revision,
         | 
| 122 | 
            +
                        AutoEvalColumn.average.name: average,
         | 
| 123 | 
            +
                        AutoEvalColumn.license.name: self.license,
         | 
| 124 | 
            +
                        AutoEvalColumn.likes.name: self.likes,
         | 
| 125 | 
            +
                        AutoEvalColumn.params.name: self.num_params,
         | 
| 126 | 
            +
                        AutoEvalColumn.still_on_hub.name: self.still_on_hub,
         | 
| 127 | 
            +
                    }
         | 
| 128 | 
            +
             | 
| 129 | 
            +
                    for task in Tasks:
         | 
| 130 | 
            +
                        data_dict[task.value.col_name] = self.results[task.value.benchmark]
         | 
| 131 | 
            +
             | 
| 132 | 
            +
                    return data_dict
         | 
| 133 | 
            +
             | 
| 134 | 
            +
             | 
| 135 | 
            +
            def get_request_file_for_model(requests_path, model_name, precision):
         | 
| 136 | 
            +
                """Selects the correct request file for a given model. Only keeps runs tagged as FINISHED"""
         | 
| 137 | 
            +
                request_files = os.path.join(
         | 
| 138 | 
            +
                    requests_path,
         | 
| 139 | 
            +
                    f"{model_name}_eval_request_*.json",
         | 
| 140 | 
            +
                )
         | 
| 141 | 
            +
                request_files = glob.glob(request_files)
         | 
| 142 | 
            +
             | 
| 143 | 
            +
                # Select correct request file (precision)
         | 
| 144 | 
            +
                request_file = ""
         | 
| 145 | 
            +
                request_files = sorted(request_files, reverse=True)
         | 
| 146 | 
            +
                for tmp_request_file in request_files:
         | 
| 147 | 
            +
                    with open(tmp_request_file, "r") as f:
         | 
| 148 | 
            +
                        req_content = json.load(f)
         | 
| 149 | 
            +
                        if (
         | 
| 150 | 
            +
                            req_content["status"] in ["FINISHED"]
         | 
| 151 | 
            +
                            and req_content["precision"] == precision.split(".")[-1]
         | 
| 152 | 
            +
                        ):
         | 
| 153 | 
            +
                            request_file = tmp_request_file
         | 
| 154 | 
            +
                return request_file
         | 
| 155 | 
            +
             | 
| 156 | 
            +
             | 
| 157 | 
            +
            def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResult]:
         | 
| 158 | 
            +
                """From the path of the results folder root, extract all needed info for results"""
         | 
| 159 | 
            +
                model_result_filepaths = []
         | 
| 160 | 
            +
             | 
| 161 | 
            +
                for root, _, files in os.walk(results_path):
         | 
| 162 | 
            +
                    # We should only have json files in model results
         | 
| 163 | 
            +
                    if len(files) == 0 or any([not f.endswith(".json") for f in files]):
         | 
| 164 | 
            +
                        continue
         | 
| 165 | 
            +
             | 
| 166 | 
            +
                    # Sort the files by date
         | 
| 167 | 
            +
                    try:
         | 
| 168 | 
            +
                        files.sort(key=lambda x: x.removesuffix(".json").removeprefix("results_")[:-7])
         | 
| 169 | 
            +
                    except dateutil.parser._parser.ParserError:
         | 
| 170 | 
            +
                        files = [files[-1]]
         | 
| 171 | 
            +
             | 
| 172 | 
            +
                    for file in files:
         | 
| 173 | 
            +
                        model_result_filepaths.append(os.path.join(root, file))
         | 
| 174 | 
            +
             | 
| 175 | 
            +
                eval_results = {}
         | 
| 176 | 
            +
                for model_result_filepath in model_result_filepaths:
         | 
| 177 | 
            +
                    # Creation of result
         | 
| 178 | 
            +
                    eval_result = EvalResult.init_from_json_file(model_result_filepath)
         | 
| 179 | 
            +
                    eval_result.update_with_request_file(requests_path)
         | 
| 180 | 
            +
             | 
| 181 | 
            +
                    # Store results of same eval together
         | 
| 182 | 
            +
                    eval_name = eval_result.eval_name
         | 
| 183 | 
            +
                    if eval_name in eval_results.keys():
         | 
| 184 | 
            +
                        eval_results[eval_name].results.update({k: v for k, v in eval_result.results.items() if v is not None})
         | 
| 185 | 
            +
                    else:
         | 
| 186 | 
            +
                        eval_results[eval_name] = eval_result
         | 
| 187 | 
            +
             | 
| 188 | 
            +
                results = []
         | 
| 189 | 
            +
                for v in eval_results.values():
         | 
| 190 | 
            +
                    try:
         | 
| 191 | 
            +
                        v.to_dict() # we test if the dict version is complete
         | 
| 192 | 
            +
                        results.append(v)
         | 
| 193 | 
            +
                    except KeyError:  # not all eval values present
         | 
| 194 | 
            +
                        continue
         | 
| 195 | 
            +
             | 
| 196 | 
            +
                return results
         | 
    	
        src/populate.py
    ADDED
    
    | @@ -0,0 +1,58 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import json
         | 
| 2 | 
            +
            import os
         | 
| 3 | 
            +
             | 
| 4 | 
            +
            import pandas as pd
         | 
| 5 | 
            +
             | 
| 6 | 
            +
            from src.display.formatting import has_no_nan_values, make_clickable_model
         | 
| 7 | 
            +
            from src.display.utils import AutoEvalColumn, EvalQueueColumn
         | 
| 8 | 
            +
            from src.leaderboard.read_evals import get_raw_eval_results
         | 
| 9 | 
            +
             | 
| 10 | 
            +
             | 
| 11 | 
            +
            def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
         | 
| 12 | 
            +
                """Creates a dataframe from all the individual experiment results"""
         | 
| 13 | 
            +
                raw_data = get_raw_eval_results(results_path, requests_path)
         | 
| 14 | 
            +
                all_data_json = [v.to_dict() for v in raw_data]
         | 
| 15 | 
            +
             | 
| 16 | 
            +
                df = pd.DataFrame.from_records(all_data_json)
         | 
| 17 | 
            +
                df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
         | 
| 18 | 
            +
                df = df[cols].round(decimals=2)
         | 
| 19 | 
            +
             | 
| 20 | 
            +
                # filter out if any of the benchmarks have not been produced
         | 
| 21 | 
            +
                df = df[has_no_nan_values(df, benchmark_cols)]
         | 
| 22 | 
            +
                return df
         | 
| 23 | 
            +
             | 
| 24 | 
            +
             | 
| 25 | 
            +
            def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
         | 
| 26 | 
            +
                """Creates the different dataframes for the evaluation queues requestes"""
         | 
| 27 | 
            +
                entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")]
         | 
| 28 | 
            +
                all_evals = []
         | 
| 29 | 
            +
             | 
| 30 | 
            +
                for entry in entries:
         | 
| 31 | 
            +
                    if ".json" in entry:
         | 
| 32 | 
            +
                        file_path = os.path.join(save_path, entry)
         | 
| 33 | 
            +
                        with open(file_path) as fp:
         | 
| 34 | 
            +
                            data = json.load(fp)
         | 
| 35 | 
            +
             | 
| 36 | 
            +
                        data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
         | 
| 37 | 
            +
                        data[EvalQueueColumn.revision.name] = data.get("revision", "main")
         | 
| 38 | 
            +
             | 
| 39 | 
            +
                        all_evals.append(data)
         | 
| 40 | 
            +
                    elif ".md" not in entry:
         | 
| 41 | 
            +
                        # this is a folder
         | 
| 42 | 
            +
                        sub_entries = [e for e in os.listdir(f"{save_path}/{entry}") if os.path.isfile(e) and not e.startswith(".")]
         | 
| 43 | 
            +
                        for sub_entry in sub_entries:
         | 
| 44 | 
            +
                            file_path = os.path.join(save_path, entry, sub_entry)
         | 
| 45 | 
            +
                            with open(file_path) as fp:
         | 
| 46 | 
            +
                                data = json.load(fp)
         | 
| 47 | 
            +
             | 
| 48 | 
            +
                            data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
         | 
| 49 | 
            +
                            data[EvalQueueColumn.revision.name] = data.get("revision", "main")
         | 
| 50 | 
            +
                            all_evals.append(data)
         | 
| 51 | 
            +
             | 
| 52 | 
            +
                pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]]
         | 
| 53 | 
            +
                running_list = [e for e in all_evals if e["status"] == "RUNNING"]
         | 
| 54 | 
            +
                finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"]
         | 
| 55 | 
            +
                df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
         | 
| 56 | 
            +
                df_running = pd.DataFrame.from_records(running_list, columns=cols)
         | 
| 57 | 
            +
                df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
         | 
| 58 | 
            +
                return df_finished[cols], df_running[cols], df_pending[cols]
         | 
    	
        src/submission/check_validity.py
    ADDED
    
    | @@ -0,0 +1,99 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import json
         | 
| 2 | 
            +
            import os
         | 
| 3 | 
            +
            import re
         | 
| 4 | 
            +
            from collections import defaultdict
         | 
| 5 | 
            +
            from datetime import datetime, timedelta, timezone
         | 
| 6 | 
            +
             | 
| 7 | 
            +
            import huggingface_hub
         | 
| 8 | 
            +
            from huggingface_hub import ModelCard
         | 
| 9 | 
            +
            from huggingface_hub.hf_api import ModelInfo
         | 
| 10 | 
            +
            from transformers import AutoConfig
         | 
| 11 | 
            +
            from transformers.models.auto.tokenization_auto import AutoTokenizer
         | 
| 12 | 
            +
             | 
| 13 | 
            +
            def check_model_card(repo_id: str) -> tuple[bool, str]:
         | 
| 14 | 
            +
                """Checks if the model card and license exist and have been filled"""
         | 
| 15 | 
            +
                try:
         | 
| 16 | 
            +
                    card = ModelCard.load(repo_id)
         | 
| 17 | 
            +
                except huggingface_hub.utils.EntryNotFoundError:
         | 
| 18 | 
            +
                    return False, "Please add a model card to your model to explain how you trained/fine-tuned it."
         | 
| 19 | 
            +
             | 
| 20 | 
            +
                # Enforce license metadata
         | 
| 21 | 
            +
                if card.data.license is None:
         | 
| 22 | 
            +
                    if not ("license_name" in card.data and "license_link" in card.data):
         | 
| 23 | 
            +
                        return False, (
         | 
| 24 | 
            +
                            "License not found. Please add a license to your model card using the `license` metadata or a"
         | 
| 25 | 
            +
                            " `license_name`/`license_link` pair."
         | 
| 26 | 
            +
                        )
         | 
| 27 | 
            +
             | 
| 28 | 
            +
                # Enforce card content
         | 
| 29 | 
            +
                if len(card.text) < 200:
         | 
| 30 | 
            +
                    return False, "Please add a description to your model card, it is too short."
         | 
| 31 | 
            +
             | 
| 32 | 
            +
                return True, ""
         | 
| 33 | 
            +
             | 
| 34 | 
            +
            def is_model_on_hub(model_name: str, revision: str, token: str = None, trust_remote_code=False, test_tokenizer=False) -> tuple[bool, str]:
         | 
| 35 | 
            +
                """Checks if the model model_name is on the hub, and whether it (and its tokenizer) can be loaded with AutoClasses."""
         | 
| 36 | 
            +
                try:
         | 
| 37 | 
            +
                    config = AutoConfig.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
         | 
| 38 | 
            +
                    if test_tokenizer:
         | 
| 39 | 
            +
                        try:
         | 
| 40 | 
            +
                            tk = AutoTokenizer.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
         | 
| 41 | 
            +
                        except ValueError as e:
         | 
| 42 | 
            +
                            return (
         | 
| 43 | 
            +
                                False,
         | 
| 44 | 
            +
                                f"uses a tokenizer which is not in a transformers release: {e}",
         | 
| 45 | 
            +
                                None
         | 
| 46 | 
            +
                            )
         | 
| 47 | 
            +
                        except Exception as e:
         | 
| 48 | 
            +
                            return (False, "'s tokenizer cannot be loaded. Is your tokenizer class in a stable transformers release, and correctly configured?", None)
         | 
| 49 | 
            +
                    return True, None, config
         | 
| 50 | 
            +
             | 
| 51 | 
            +
                except ValueError:
         | 
| 52 | 
            +
                    return (
         | 
| 53 | 
            +
                        False,
         | 
| 54 | 
            +
                        "needs to be launched with `trust_remote_code=True`. For safety reason, we do not allow these models to be automatically submitted to the leaderboard.",
         | 
| 55 | 
            +
                        None
         | 
| 56 | 
            +
                    )
         | 
| 57 | 
            +
             | 
| 58 | 
            +
                except Exception as e:
         | 
| 59 | 
            +
                    return False, "was not found on hub!", None
         | 
| 60 | 
            +
             | 
| 61 | 
            +
             | 
| 62 | 
            +
            def get_model_size(model_info: ModelInfo, precision: str):
         | 
| 63 | 
            +
                """Gets the model size from the configuration, or the model name if the configuration does not contain the information."""
         | 
| 64 | 
            +
                try:
         | 
| 65 | 
            +
                    model_size = round(model_info.safetensors["total"] / 1e9, 3)
         | 
| 66 | 
            +
                except (AttributeError, TypeError):
         | 
| 67 | 
            +
                    return 0  # Unknown model sizes are indicated as 0, see NUMERIC_INTERVALS in app.py
         | 
| 68 | 
            +
             | 
| 69 | 
            +
                size_factor = 8 if (precision == "GPTQ" or "gptq" in model_info.modelId.lower()) else 1
         | 
| 70 | 
            +
                model_size = size_factor * model_size
         | 
| 71 | 
            +
                return model_size
         | 
| 72 | 
            +
             | 
| 73 | 
            +
            def get_model_arch(model_info: ModelInfo):
         | 
| 74 | 
            +
                """Gets the model architecture from the configuration"""
         | 
| 75 | 
            +
                return model_info.config.get("architectures", "Unknown")
         | 
| 76 | 
            +
             | 
| 77 | 
            +
            def already_submitted_models(requested_models_dir: str) -> set[str]:
         | 
| 78 | 
            +
                """Gather a list of already submitted models to avoid duplicates"""
         | 
| 79 | 
            +
                depth = 1
         | 
| 80 | 
            +
                file_names = []
         | 
| 81 | 
            +
                users_to_submission_dates = defaultdict(list)
         | 
| 82 | 
            +
             | 
| 83 | 
            +
                for root, _, files in os.walk(requested_models_dir):
         | 
| 84 | 
            +
                    current_depth = root.count(os.sep) - requested_models_dir.count(os.sep)
         | 
| 85 | 
            +
                    if current_depth == depth:
         | 
| 86 | 
            +
                        for file in files:
         | 
| 87 | 
            +
                            if not file.endswith(".json"):
         | 
| 88 | 
            +
                                continue
         | 
| 89 | 
            +
                            with open(os.path.join(root, file), "r") as f:
         | 
| 90 | 
            +
                                info = json.load(f)
         | 
| 91 | 
            +
                                file_names.append(f"{info['model']}_{info['revision']}_{info['precision']}")
         | 
| 92 | 
            +
             | 
| 93 | 
            +
                                # Select organisation
         | 
| 94 | 
            +
                                if info["model"].count("/") == 0 or "submitted_time" not in info:
         | 
| 95 | 
            +
                                    continue
         | 
| 96 | 
            +
                                organisation, _ = info["model"].split("/")
         | 
| 97 | 
            +
                                users_to_submission_dates[organisation].append(info["submitted_time"])
         | 
| 98 | 
            +
             | 
| 99 | 
            +
                return set(file_names), users_to_submission_dates
         | 
    	
        src/submission/submit.py
    ADDED
    
    | @@ -0,0 +1,119 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import json
         | 
| 2 | 
            +
            import os
         | 
| 3 | 
            +
            from datetime import datetime, timezone
         | 
| 4 | 
            +
             | 
| 5 | 
            +
            from src.display.formatting import styled_error, styled_message, styled_warning
         | 
| 6 | 
            +
            from src.envs import API, EVAL_REQUESTS_PATH, TOKEN, QUEUE_REPO
         | 
| 7 | 
            +
            from src.submission.check_validity import (
         | 
| 8 | 
            +
                already_submitted_models,
         | 
| 9 | 
            +
                check_model_card,
         | 
| 10 | 
            +
                get_model_size,
         | 
| 11 | 
            +
                is_model_on_hub,
         | 
| 12 | 
            +
            )
         | 
| 13 | 
            +
             | 
| 14 | 
            +
            REQUESTED_MODELS = None
         | 
| 15 | 
            +
            USERS_TO_SUBMISSION_DATES = None
         | 
| 16 | 
            +
             | 
| 17 | 
            +
            def add_new_eval(
         | 
| 18 | 
            +
                model: str,
         | 
| 19 | 
            +
                base_model: str,
         | 
| 20 | 
            +
                revision: str,
         | 
| 21 | 
            +
                precision: str,
         | 
| 22 | 
            +
                weight_type: str,
         | 
| 23 | 
            +
                model_type: str,
         | 
| 24 | 
            +
            ):
         | 
| 25 | 
            +
                global REQUESTED_MODELS
         | 
| 26 | 
            +
                global USERS_TO_SUBMISSION_DATES
         | 
| 27 | 
            +
                if not REQUESTED_MODELS:
         | 
| 28 | 
            +
                    REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
         | 
| 29 | 
            +
             | 
| 30 | 
            +
                user_name = ""
         | 
| 31 | 
            +
                model_path = model
         | 
| 32 | 
            +
                if "/" in model:
         | 
| 33 | 
            +
                    user_name = model.split("/")[0]
         | 
| 34 | 
            +
                    model_path = model.split("/")[1]
         | 
| 35 | 
            +
             | 
| 36 | 
            +
                precision = precision.split(" ")[0]
         | 
| 37 | 
            +
                current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
         | 
| 38 | 
            +
             | 
| 39 | 
            +
                if model_type is None or model_type == "":
         | 
| 40 | 
            +
                    return styled_error("Please select a model type.")
         | 
| 41 | 
            +
             | 
| 42 | 
            +
                # Does the model actually exist?
         | 
| 43 | 
            +
                if revision == "":
         | 
| 44 | 
            +
                    revision = "main"
         | 
| 45 | 
            +
             | 
| 46 | 
            +
                # Is the model on the hub?
         | 
| 47 | 
            +
                if weight_type in ["Delta", "Adapter"]:
         | 
| 48 | 
            +
                    base_model_on_hub, error, _ = is_model_on_hub(model_name=base_model, revision=revision, token=TOKEN, test_tokenizer=True)
         | 
| 49 | 
            +
                    if not base_model_on_hub:
         | 
| 50 | 
            +
                        return styled_error(f'Base model "{base_model}" {error}')
         | 
| 51 | 
            +
             | 
| 52 | 
            +
                if not weight_type == "Adapter":
         | 
| 53 | 
            +
                    model_on_hub, error, _ = is_model_on_hub(model_name=model, revision=revision, token=TOKEN, test_tokenizer=True)
         | 
| 54 | 
            +
                    if not model_on_hub:
         | 
| 55 | 
            +
                        return styled_error(f'Model "{model}" {error}')
         | 
| 56 | 
            +
             | 
| 57 | 
            +
                # Is the model info correctly filled?
         | 
| 58 | 
            +
                try:
         | 
| 59 | 
            +
                    model_info = API.model_info(repo_id=model, revision=revision)
         | 
| 60 | 
            +
                except Exception:
         | 
| 61 | 
            +
                    return styled_error("Could not get your model information. Please fill it up properly.")
         | 
| 62 | 
            +
             | 
| 63 | 
            +
                model_size = get_model_size(model_info=model_info, precision=precision)
         | 
| 64 | 
            +
             | 
| 65 | 
            +
                # Were the model card and license filled?
         | 
| 66 | 
            +
                try:
         | 
| 67 | 
            +
                    license = model_info.cardData["license"]
         | 
| 68 | 
            +
                except Exception:
         | 
| 69 | 
            +
                    return styled_error("Please select a license for your model")
         | 
| 70 | 
            +
             | 
| 71 | 
            +
                modelcard_OK, error_msg = check_model_card(model)
         | 
| 72 | 
            +
                if not modelcard_OK:
         | 
| 73 | 
            +
                    return styled_error(error_msg)
         | 
| 74 | 
            +
             | 
| 75 | 
            +
                # Seems good, creating the eval
         | 
| 76 | 
            +
                print("Adding new eval")
         | 
| 77 | 
            +
             | 
| 78 | 
            +
                eval_entry = {
         | 
| 79 | 
            +
                    "model": model,
         | 
| 80 | 
            +
                    "base_model": base_model,
         | 
| 81 | 
            +
                    "revision": revision,
         | 
| 82 | 
            +
                    "precision": precision,
         | 
| 83 | 
            +
                    "weight_type": weight_type,
         | 
| 84 | 
            +
                    "status": "PENDING",
         | 
| 85 | 
            +
                    "submitted_time": current_time,
         | 
| 86 | 
            +
                    "model_type": model_type,
         | 
| 87 | 
            +
                    "likes": model_info.likes,
         | 
| 88 | 
            +
                    "params": model_size,
         | 
| 89 | 
            +
                    "license": license,
         | 
| 90 | 
            +
                    "private": False,
         | 
| 91 | 
            +
                }
         | 
| 92 | 
            +
             | 
| 93 | 
            +
                # Check for duplicate submission
         | 
| 94 | 
            +
                if f"{model}_{revision}_{precision}" in REQUESTED_MODELS:
         | 
| 95 | 
            +
                    return styled_warning("This model has been already submitted.")
         | 
| 96 | 
            +
             | 
| 97 | 
            +
                print("Creating eval file")
         | 
| 98 | 
            +
                OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
         | 
| 99 | 
            +
                os.makedirs(OUT_DIR, exist_ok=True)
         | 
| 100 | 
            +
                out_path = f"{OUT_DIR}/{model_path}_eval_request_False_{precision}_{weight_type}.json"
         | 
| 101 | 
            +
             | 
| 102 | 
            +
                with open(out_path, "w") as f:
         | 
| 103 | 
            +
                    f.write(json.dumps(eval_entry))
         | 
| 104 | 
            +
             | 
| 105 | 
            +
                print("Uploading eval file")
         | 
| 106 | 
            +
                API.upload_file(
         | 
| 107 | 
            +
                    path_or_fileobj=out_path,
         | 
| 108 | 
            +
                    path_in_repo=out_path.split("eval-queue/")[1],
         | 
| 109 | 
            +
                    repo_id=QUEUE_REPO,
         | 
| 110 | 
            +
                    repo_type="dataset",
         | 
| 111 | 
            +
                    commit_message=f"Add {model} to eval queue",
         | 
| 112 | 
            +
                )
         | 
| 113 | 
            +
             | 
| 114 | 
            +
                # Remove the local file
         | 
| 115 | 
            +
                os.remove(out_path)
         | 
| 116 | 
            +
             | 
| 117 | 
            +
                return styled_message(
         | 
| 118 | 
            +
                    "Your request has been submitted to the evaluation queue!\nPlease wait for up to an hour for the model to show in the PENDING list."
         | 
| 119 | 
            +
                )
         | 

 
		