Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Update src/leaderboard/read_evals.py
Browse files
src/leaderboard/read_evals.py
CHANGED
|
@@ -198,10 +198,11 @@ def get_request_file_for_model(requests_path, model_name, precision):
|
|
| 198 |
def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResult]:
|
| 199 |
"""From the path of the results folder root, extract all needed info for results"""
|
| 200 |
model_result_filepaths = []
|
| 201 |
-
print(f"Checking results in: {results_path}")
|
| 202 |
-
print(f"Model result filepaths found: {model_result_filepaths}")
|
| 203 |
|
| 204 |
-
for root,
|
|
|
|
|
|
|
|
|
|
| 205 |
# We should only have json files in model results
|
| 206 |
if len(files) == 0 or any([not f.endswith(".json") for f in files]):
|
| 207 |
continue
|
|
@@ -214,15 +215,15 @@ def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResu
|
|
| 214 |
|
| 215 |
for file in files:
|
| 216 |
model_result_filepaths.append(os.path.join(root, file))
|
|
|
|
|
|
|
| 217 |
|
| 218 |
eval_results = {}
|
| 219 |
for model_result_filepath in model_result_filepaths:
|
| 220 |
print(f"Processing result file: {model_result_filepath}")
|
| 221 |
# Creation of result
|
| 222 |
-
print(f"Eval result for {model_result_filepath}: {eval_result}")
|
| 223 |
eval_result = EvalResult.init_from_json_file(model_result_filepath)
|
| 224 |
eval_result.update_with_request_file(requests_path)
|
| 225 |
-
print(f"Updated eval result with request file: {eval_result}")
|
| 226 |
|
| 227 |
# Store results of same eval together
|
| 228 |
eval_name = eval_result.eval_name
|
|
@@ -230,9 +231,15 @@ def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResu
|
|
| 230 |
eval_results[eval_name].results.update({k: v for k, v in eval_result.results.items() if v is not None})
|
| 231 |
else:
|
| 232 |
eval_results[eval_name] = eval_result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 233 |
|
| 234 |
data_dict = eval_result.to_dict()
|
| 235 |
-
print(f"Final leaderboard entry: {data_dict}")
|
| 236 |
|
| 237 |
results = []
|
| 238 |
for v in eval_results.values():
|
|
|
|
| 198 |
def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResult]:
|
| 199 |
"""From the path of the results folder root, extract all needed info for results"""
|
| 200 |
model_result_filepaths = []
|
|
|
|
|
|
|
| 201 |
|
| 202 |
+
for root, dirs, files in os.walk(results_path):
|
| 203 |
+
print(f"Current directory: {root}")
|
| 204 |
+
print(f"Subdirectories: {dirs}")
|
| 205 |
+
print(f"Files: {files}")
|
| 206 |
# We should only have json files in model results
|
| 207 |
if len(files) == 0 or any([not f.endswith(".json") for f in files]):
|
| 208 |
continue
|
|
|
|
| 215 |
|
| 216 |
for file in files:
|
| 217 |
model_result_filepaths.append(os.path.join(root, file))
|
| 218 |
+
|
| 219 |
+
print(f"Model result filepaths found: {model_result_filepaths}")
|
| 220 |
|
| 221 |
eval_results = {}
|
| 222 |
for model_result_filepath in model_result_filepaths:
|
| 223 |
print(f"Processing result file: {model_result_filepath}")
|
| 224 |
# Creation of result
|
|
|
|
| 225 |
eval_result = EvalResult.init_from_json_file(model_result_filepath)
|
| 226 |
eval_result.update_with_request_file(requests_path)
|
|
|
|
| 227 |
|
| 228 |
# Store results of same eval together
|
| 229 |
eval_name = eval_result.eval_name
|
|
|
|
| 231 |
eval_results[eval_name].results.update({k: v for k, v in eval_result.results.items() if v is not None})
|
| 232 |
else:
|
| 233 |
eval_results[eval_name] = eval_result
|
| 234 |
+
|
| 235 |
+
# デバッグ: 最終的なデータ確認
|
| 236 |
+
if eval_result:
|
| 237 |
+
data_dict = eval_result.to_dict()
|
| 238 |
+
print(f"Final leaderboard entry: {data_dict}")
|
| 239 |
+
else:
|
| 240 |
+
print("No valid eval result was processed.")
|
| 241 |
|
| 242 |
data_dict = eval_result.to_dict()
|
|
|
|
| 243 |
|
| 244 |
results = []
|
| 245 |
for v in eval_results.values():
|