Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
shigeki Ishida
commited on
Commit
·
bbf76cb
1
Parent(s):
91d92c5
fix vllm version name and code
Browse files- src/display/utils.py +10 -0
- src/envs.py +0 -15
- src/submission/submit.py +6 -16
src/display/utils.py
CHANGED
|
@@ -170,6 +170,16 @@ class Backend(Enum):
|
|
| 170 |
return Backend.Unknown
|
| 171 |
|
| 172 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 173 |
# Column selection
|
| 174 |
COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]
|
| 175 |
TYPES = [c.type for c in fields(AutoEvalColumn)]
|
|
|
|
| 170 |
return Backend.Unknown
|
| 171 |
|
| 172 |
|
| 173 |
+
class VllmVersion(Enum):
|
| 174 |
+
current = ModelDetails("v0.6.3post1")
|
| 175 |
+
Unknown = ModelDetails("?")
|
| 176 |
+
|
| 177 |
+
def from_str(version):
|
| 178 |
+
if version == "v0.6.3post1":
|
| 179 |
+
return VllmVersion.current
|
| 180 |
+
return VllmVersion.Unknown
|
| 181 |
+
|
| 182 |
+
|
| 183 |
# Column selection
|
| 184 |
COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]
|
| 185 |
TYPES = [c.type for c in fields(AutoEvalColumn)]
|
src/envs.py
CHANGED
|
@@ -1,6 +1,5 @@
|
|
| 1 |
import os
|
| 2 |
|
| 3 |
-
import requests
|
| 4 |
from huggingface_hub import HfApi
|
| 5 |
|
| 6 |
# Info to change for your repository
|
|
@@ -23,18 +22,4 @@ EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results")
|
|
| 23 |
EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
|
| 24 |
EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
|
| 25 |
|
| 26 |
-
|
| 27 |
-
# vllm version
|
| 28 |
-
def get_latest_vllm_version() -> str:
|
| 29 |
-
"""GitHubからvllmの最新バージョンを取得する"""
|
| 30 |
-
url = "https://api.github.com/repos/vllm-project/vllm/releases/latest"
|
| 31 |
-
response = requests.get(url)
|
| 32 |
-
if response.status_code == 200:
|
| 33 |
-
latest_release = response.json()
|
| 34 |
-
return latest_release["tag_name"]
|
| 35 |
-
return None # APIリクエストが失敗した場合はNoneを返す
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
VLLM_CURRENT_VERSION = get_latest_vllm_version()
|
| 39 |
-
|
| 40 |
API = HfApi(token=TOKEN)
|
|
|
|
| 1 |
import os
|
| 2 |
|
|
|
|
| 3 |
from huggingface_hub import HfApi
|
| 4 |
|
| 5 |
# Info to change for your repository
|
|
|
|
| 22 |
EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
|
| 23 |
EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
|
| 24 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
API = HfApi(token=TOKEN)
|
src/submission/submit.py
CHANGED
|
@@ -3,8 +3,8 @@ import os
|
|
| 3 |
from datetime import datetime, timezone
|
| 4 |
|
| 5 |
from src.display.formatting import styled_error, styled_message, styled_warning
|
| 6 |
-
from src.display.utils import Version
|
| 7 |
-
from src.envs import API, EVAL_REQUESTS_PATH, QUEUE_REPO, TOKEN
|
| 8 |
from src.submission.check_validity import already_submitted_models, check_model_card, get_model_size, is_model_on_hub
|
| 9 |
|
| 10 |
REQUESTED_MODELS = None
|
|
@@ -24,14 +24,12 @@ def add_new_eval(
|
|
| 24 |
REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
|
| 25 |
|
| 26 |
current_version = Version.v1_4_1.value.name
|
| 27 |
-
current_vllm_version =
|
| 28 |
|
| 29 |
# バージョン情報を含めた重複チェック
|
| 30 |
-
submission_id = f"{model}_{precision}_{add_special_tokens}_{current_version}
|
| 31 |
if submission_id in REQUESTED_MODELS:
|
| 32 |
-
return styled_warning(
|
| 33 |
-
f"This model has already been evaluated with llm-jp-eval version {current_version} and vllm version {current_vllm_version}"
|
| 34 |
-
)
|
| 35 |
|
| 36 |
user_name = ""
|
| 37 |
model_path = model
|
|
@@ -50,14 +48,7 @@ def add_new_eval(
|
|
| 50 |
revision = "main"
|
| 51 |
|
| 52 |
# Is the model on the hub?
|
| 53 |
-
model_on_hub, error,
|
| 54 |
-
model_name=model, revision=revision, token=TOKEN, test_tokenizer=True
|
| 55 |
-
)
|
| 56 |
-
architecture = "?"
|
| 57 |
-
if model_config is not None:
|
| 58 |
-
architectures = getattr(model_config, "architectures", None)
|
| 59 |
-
if architectures:
|
| 60 |
-
architecture = ";".join(architectures)
|
| 61 |
if not model_on_hub:
|
| 62 |
return styled_error(f'Model "{model}" {error}')
|
| 63 |
|
|
@@ -95,7 +86,6 @@ def add_new_eval(
|
|
| 95 |
"private": False,
|
| 96 |
"add_special_tokens": add_special_tokens,
|
| 97 |
"llm_jp_eval_version": current_version,
|
| 98 |
-
"architecture": architecture,
|
| 99 |
"vllm_version": current_vllm_version,
|
| 100 |
}
|
| 101 |
|
|
|
|
| 3 |
from datetime import datetime, timezone
|
| 4 |
|
| 5 |
from src.display.formatting import styled_error, styled_message, styled_warning
|
| 6 |
+
from src.display.utils import Version, VllmVersion
|
| 7 |
+
from src.envs import API, EVAL_REQUESTS_PATH, QUEUE_REPO, TOKEN
|
| 8 |
from src.submission.check_validity import already_submitted_models, check_model_card, get_model_size, is_model_on_hub
|
| 9 |
|
| 10 |
REQUESTED_MODELS = None
|
|
|
|
| 24 |
REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
|
| 25 |
|
| 26 |
current_version = Version.v1_4_1.value.name
|
| 27 |
+
current_vllm_version = VllmVersion.current.value.name
|
| 28 |
|
| 29 |
# バージョン情報を含めた重複チェック
|
| 30 |
+
submission_id = f"{model}_{precision}_{add_special_tokens}_{current_version}"
|
| 31 |
if submission_id in REQUESTED_MODELS:
|
| 32 |
+
return styled_warning(f"This model has already been evaluated with llm-jp-eval version {current_version}")
|
|
|
|
|
|
|
| 33 |
|
| 34 |
user_name = ""
|
| 35 |
model_path = model
|
|
|
|
| 48 |
revision = "main"
|
| 49 |
|
| 50 |
# Is the model on the hub?
|
| 51 |
+
model_on_hub, error, _ = is_model_on_hub(model_name=model, revision=revision, token=TOKEN, test_tokenizer=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
if not model_on_hub:
|
| 53 |
return styled_error(f'Model "{model}" {error}')
|
| 54 |
|
|
|
|
| 86 |
"private": False,
|
| 87 |
"add_special_tokens": add_special_tokens,
|
| 88 |
"llm_jp_eval_version": current_version,
|
|
|
|
| 89 |
"vllm_version": current_vllm_version,
|
| 90 |
}
|
| 91 |
|