Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
output to json
Browse files- app.py +9 -4
- src/populate.py +3 -2
app.py
CHANGED
|
@@ -43,14 +43,14 @@ def launch_backend():
|
|
| 43 |
_ = subprocess.run(["python", "main_backend.py"])
|
| 44 |
|
| 45 |
try:
|
| 46 |
-
print(EVAL_REQUESTS_PATH)
|
| 47 |
snapshot_download(
|
| 48 |
repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
|
| 49 |
)
|
| 50 |
except Exception:
|
| 51 |
restart_space()
|
| 52 |
try:
|
| 53 |
-
print(EVAL_RESULTS_PATH)
|
| 54 |
snapshot_download(
|
| 55 |
repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
|
| 56 |
)
|
|
@@ -162,8 +162,8 @@ def filter_models(
|
|
| 162 |
type_emoji = [t[0] for t in type_query]
|
| 163 |
filtered_df = filtered_df.loc[df[AutoEvalColumn.model_type_symbol.name].isin(type_emoji)]
|
| 164 |
filtered_df = filtered_df.loc[df[AutoEvalColumn.precision.name].isin(precision_query + ["None"])]
|
| 165 |
-
print(df[AutoEvalColumn.n_shot.name])
|
| 166 |
-
print(nshot_query)
|
| 167 |
filtered_df = filtered_df.loc[df[AutoEvalColumn.n_shot.name].isin(nshot_query + ["None"])]
|
| 168 |
|
| 169 |
numeric_interval = pd.IntervalIndex(sorted([NUMERIC_INTERVALS[s] for s in size_query]))
|
|
@@ -403,12 +403,17 @@ with demo:
|
|
| 403 |
show_copy_button=True,
|
| 404 |
)
|
| 405 |
csv = gr.File(interactive=False, value="output.csv", visible=False)
|
|
|
|
| 406 |
|
| 407 |
|
| 408 |
|
| 409 |
def update_visibility(radio):
|
| 410 |
return gr.File(interactive=False, value="output.csv", visible=True)
|
|
|
|
|
|
|
|
|
|
| 411 |
deleted_models_visibility.change(update_visibility, deleted_models_visibility, csv)
|
|
|
|
| 412 |
|
| 413 |
scheduler = BackgroundScheduler()
|
| 414 |
scheduler.add_job(restart_space, "interval", seconds=1800)
|
|
|
|
| 43 |
_ = subprocess.run(["python", "main_backend.py"])
|
| 44 |
|
| 45 |
try:
|
| 46 |
+
# print(EVAL_REQUESTS_PATH)
|
| 47 |
snapshot_download(
|
| 48 |
repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
|
| 49 |
)
|
| 50 |
except Exception:
|
| 51 |
restart_space()
|
| 52 |
try:
|
| 53 |
+
# print(EVAL_RESULTS_PATH)
|
| 54 |
snapshot_download(
|
| 55 |
repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
|
| 56 |
)
|
|
|
|
| 162 |
type_emoji = [t[0] for t in type_query]
|
| 163 |
filtered_df = filtered_df.loc[df[AutoEvalColumn.model_type_symbol.name].isin(type_emoji)]
|
| 164 |
filtered_df = filtered_df.loc[df[AutoEvalColumn.precision.name].isin(precision_query + ["None"])]
|
| 165 |
+
# print(df[AutoEvalColumn.n_shot.name])
|
| 166 |
+
# print(nshot_query)
|
| 167 |
filtered_df = filtered_df.loc[df[AutoEvalColumn.n_shot.name].isin(nshot_query + ["None"])]
|
| 168 |
|
| 169 |
numeric_interval = pd.IntervalIndex(sorted([NUMERIC_INTERVALS[s] for s in size_query]))
|
|
|
|
| 403 |
show_copy_button=True,
|
| 404 |
)
|
| 405 |
csv = gr.File(interactive=False, value="output.csv", visible=False)
|
| 406 |
+
json = gr.File(interactive=False, value="all_data.json", visible=False)
|
| 407 |
|
| 408 |
|
| 409 |
|
| 410 |
def update_visibility(radio):
|
| 411 |
return gr.File(interactive=False, value="output.csv", visible=True)
|
| 412 |
+
def update_visibility_json(radio):
|
| 413 |
+
return gr.File(interactive=False, value="all_data.json", visible=True)
|
| 414 |
+
|
| 415 |
deleted_models_visibility.change(update_visibility, deleted_models_visibility, csv)
|
| 416 |
+
deleted_models_visibility.change(update_visibility_json, deleted_models_visibility, json)
|
| 417 |
|
| 418 |
scheduler = BackgroundScheduler()
|
| 419 |
scheduler.add_job(restart_space, "interval", seconds=1800)
|
src/populate.py
CHANGED
|
@@ -12,13 +12,14 @@ def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchm
|
|
| 12 |
metadata=json.load(open(f"{requests_path}/metadata.json"))
|
| 13 |
raw_data = get_raw_eval_results(results_path, requests_path, metadata)
|
| 14 |
all_data_json = [v.to_dict() for v in raw_data]
|
| 15 |
-
print(all_data_json)
|
|
|
|
| 16 |
df = pd.DataFrame.from_records(all_data_json)
|
| 17 |
df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
|
| 18 |
df = df[cols].round(decimals=2)
|
| 19 |
|
| 20 |
# filter out if any of the benchmarks have not been produced
|
| 21 |
-
df2 = df[has_no_nan_values(df, benchmark_cols)]
|
| 22 |
return raw_data, df
|
| 23 |
|
| 24 |
|
|
|
|
| 12 |
metadata=json.load(open(f"{requests_path}/metadata.json"))
|
| 13 |
raw_data = get_raw_eval_results(results_path, requests_path, metadata)
|
| 14 |
all_data_json = [v.to_dict() for v in raw_data]
|
| 15 |
+
# print(all_data_json)
|
| 16 |
+
json.dump(all_data_json, open("all_data.json", "w"), indent=2, ensure_ascii=False)
|
| 17 |
df = pd.DataFrame.from_records(all_data_json)
|
| 18 |
df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
|
| 19 |
df = df[cols].round(decimals=2)
|
| 20 |
|
| 21 |
# filter out if any of the benchmarks have not been produced
|
| 22 |
+
#df2 = df[has_no_nan_values(df, benchmark_cols)]
|
| 23 |
return raw_data, df
|
| 24 |
|
| 25 |
|