Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Remove unused imports
Browse files- app.py +1 -1
- src/backend/run_eval_suite_harness.py +2 -2
- src/backend/run_eval_suite_lighteval.py +2 -6
- src/backend/sort_queue.py +0 -1
- src/display/log_visualizer.py +0 -1
- src/logging.py +0 -1
app.py
CHANGED
|
@@ -24,7 +24,7 @@ logging.basicConfig(level=logging.INFO)
|
|
| 24 |
logger = setup_logger(__name__)
|
| 25 |
|
| 26 |
|
| 27 |
-
intro_md =
|
| 28 |
# Intro
|
| 29 |
This is a visual for the auto evaluator.
|
| 30 |
"""
|
|
|
|
| 24 |
logger = setup_logger(__name__)
|
| 25 |
|
| 26 |
|
| 27 |
+
intro_md = """
|
| 28 |
# Intro
|
| 29 |
This is a visual for the auto evaluator.
|
| 30 |
"""
|
src/backend/run_eval_suite_harness.py
CHANGED
|
@@ -3,10 +3,10 @@ import os
|
|
| 3 |
import logging
|
| 4 |
from datetime import datetime
|
| 5 |
|
| 6 |
-
from lm_eval import
|
| 7 |
from lm_eval.tasks import TaskManager
|
| 8 |
|
| 9 |
-
from src.envs import
|
| 10 |
from src.backend.manage_requests import EvalRequest
|
| 11 |
from src.logging import setup_logger
|
| 12 |
|
|
|
|
| 3 |
import logging
|
| 4 |
from datetime import datetime
|
| 5 |
|
| 6 |
+
from lm_eval import evaluator, utils
|
| 7 |
from lm_eval.tasks import TaskManager
|
| 8 |
|
| 9 |
+
from src.envs import API
|
| 10 |
from src.backend.manage_requests import EvalRequest
|
| 11 |
from src.logging import setup_logger
|
| 12 |
|
src/backend/run_eval_suite_lighteval.py
CHANGED
|
@@ -1,16 +1,12 @@
|
|
| 1 |
import json
|
| 2 |
-
import argparse
|
| 3 |
import logging
|
| 4 |
-
from datetime import datetime
|
| 5 |
|
| 6 |
-
import lighteval
|
| 7 |
from lighteval.logging.evaluation_tracker import EvaluationTracker
|
| 8 |
from lighteval.models.model_config import InferenceEndpointModelConfig
|
| 9 |
from lighteval.pipeline import ParallelismManager, Pipeline, PipelineParameters
|
| 10 |
|
| 11 |
-
from lighteval.main_accelerate import main, EnvConfig, create_model_config
|
| 12 |
|
| 13 |
-
from src.envs import RESULTS_REPO
|
| 14 |
from src.backend.manage_requests import EvalRequest
|
| 15 |
from src.logging import setup_logger
|
| 16 |
|
|
@@ -85,7 +81,7 @@ def run_evaluation(eval_request: EvalRequest, task_names: str, batch_size: int,
|
|
| 85 |
dumped = json.dumps(results, indent=2)
|
| 86 |
logger.info(dumped)
|
| 87 |
|
| 88 |
-
except Exception
|
| 89 |
pipeline.model.cleanup()
|
| 90 |
|
| 91 |
return results
|
|
|
|
| 1 |
import json
|
|
|
|
| 2 |
import logging
|
|
|
|
| 3 |
|
|
|
|
| 4 |
from lighteval.logging.evaluation_tracker import EvaluationTracker
|
| 5 |
from lighteval.models.model_config import InferenceEndpointModelConfig
|
| 6 |
from lighteval.pipeline import ParallelismManager, Pipeline, PipelineParameters
|
| 7 |
|
|
|
|
| 8 |
|
| 9 |
+
from src.envs import RESULTS_REPO
|
| 10 |
from src.backend.manage_requests import EvalRequest
|
| 11 |
from src.logging import setup_logger
|
| 12 |
|
|
|
|
| 81 |
dumped = json.dumps(results, indent=2)
|
| 82 |
logger.info(dumped)
|
| 83 |
|
| 84 |
+
except Exception: # if eval failed, we force a cleanup
|
| 85 |
pipeline.model.cleanup()
|
| 86 |
|
| 87 |
return results
|
src/backend/sort_queue.py
CHANGED
|
@@ -1,4 +1,3 @@
|
|
| 1 |
-
import re
|
| 2 |
from dataclasses import dataclass
|
| 3 |
|
| 4 |
from huggingface_hub import HfApi
|
|
|
|
|
|
|
| 1 |
from dataclasses import dataclass
|
| 2 |
|
| 3 |
from huggingface_hub import HfApi
|
src/display/log_visualizer.py
CHANGED
|
@@ -1,5 +1,4 @@
|
|
| 1 |
from io import StringIO
|
| 2 |
-
from pathlib import Path
|
| 3 |
|
| 4 |
from bs4 import BeautifulSoup
|
| 5 |
from rich.console import Console
|
|
|
|
| 1 |
from io import StringIO
|
|
|
|
| 2 |
|
| 3 |
from bs4 import BeautifulSoup
|
| 4 |
from rich.console import Console
|
src/logging.py
CHANGED
|
@@ -1,4 +1,3 @@
|
|
| 1 |
-
import sys
|
| 2 |
from pathlib import Path
|
| 3 |
|
| 4 |
proj_dir = Path(__file__).parents[1]
|
|
|
|
|
|
|
| 1 |
from pathlib import Path
|
| 2 |
|
| 3 |
proj_dir = Path(__file__).parents[1]
|