Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Clean up
Browse files
app.py
CHANGED
|
@@ -70,6 +70,18 @@ except Exception:
|
|
| 70 |
restart_space()
|
| 71 |
|
| 72 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 73 |
# Searching and filtering
|
| 74 |
|
| 75 |
|
|
@@ -232,15 +244,8 @@ def load_query(request: gr.Request): # triggered only once at startup => read q
|
|
| 232 |
|
| 233 |
# Prepare the dataframes
|
| 234 |
|
| 235 |
-
(
|
| 236 |
-
finished_eval_queue_df,
|
| 237 |
-
running_eval_queue_df,
|
| 238 |
-
pending_eval_queue_df,
|
| 239 |
-
failed_eval_queue_df,
|
| 240 |
-
) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
|
| 241 |
|
| 242 |
-
|
| 243 |
-
leaderboard_df = original_df.copy()
|
| 244 |
leaderboard_df = filter_models(
|
| 245 |
leaderboard_df,
|
| 246 |
[t.to_str(" : ") for t in ModelType],
|
|
@@ -257,7 +262,7 @@ INITIAL_COLUMNS = ["T"] + [
|
|
| 257 |
]
|
| 258 |
leaderboard_df = select_columns(leaderboard_df, INITIAL_COLUMNS)
|
| 259 |
|
| 260 |
-
MAX_MODEL_SIZE =
|
| 261 |
|
| 262 |
|
| 263 |
# Leaderboard demo
|
|
@@ -495,7 +500,7 @@ with gr.Blocks() as demo_leaderboard:
|
|
| 495 |
|
| 496 |
# Dummy leaderboard for handling the case when the user uses backspace key
|
| 497 |
hidden_leaderboard_table_for_search = gr.Dataframe(
|
| 498 |
-
value=
|
| 499 |
headers=COLS,
|
| 500 |
datatype=TYPES,
|
| 501 |
visible=False,
|
|
@@ -581,46 +586,46 @@ with gr.Blocks() as demo_submission:
|
|
| 581 |
|
| 582 |
with gr.Column():
|
| 583 |
with gr.Accordion(
|
| 584 |
-
f"β
Finished Evaluations ({len(
|
| 585 |
open=False,
|
| 586 |
):
|
| 587 |
with gr.Row():
|
| 588 |
finished_eval_table = gr.Dataframe(
|
| 589 |
-
value=
|
| 590 |
headers=EVAL_COLS,
|
| 591 |
datatype=EVAL_TYPES,
|
| 592 |
row_count=5,
|
| 593 |
)
|
| 594 |
with gr.Accordion(
|
| 595 |
-
f"π Running Evaluation Queue ({len(
|
| 596 |
open=False,
|
| 597 |
):
|
| 598 |
with gr.Row():
|
| 599 |
running_eval_table = gr.Dataframe(
|
| 600 |
-
value=
|
| 601 |
headers=EVAL_COLS,
|
| 602 |
datatype=EVAL_TYPES,
|
| 603 |
row_count=5,
|
| 604 |
)
|
| 605 |
|
| 606 |
with gr.Accordion(
|
| 607 |
-
f"β³ Pending Evaluation Queue ({len(
|
| 608 |
open=False,
|
| 609 |
):
|
| 610 |
with gr.Row():
|
| 611 |
pending_eval_table = gr.Dataframe(
|
| 612 |
-
value=
|
| 613 |
headers=EVAL_COLS,
|
| 614 |
datatype=EVAL_TYPES,
|
| 615 |
row_count=5,
|
| 616 |
)
|
| 617 |
with gr.Accordion(
|
| 618 |
-
f"β Failed Evaluation Queue ({len(
|
| 619 |
open=False,
|
| 620 |
):
|
| 621 |
with gr.Row():
|
| 622 |
failed_eval_table = gr.Dataframe(
|
| 623 |
-
value=
|
| 624 |
headers=EVAL_COLS,
|
| 625 |
datatype=EVAL_TYPES,
|
| 626 |
row_count=5,
|
|
|
|
| 70 |
restart_space()
|
| 71 |
|
| 72 |
|
| 73 |
+
# Get dataframes
|
| 74 |
+
|
| 75 |
+
(
|
| 76 |
+
FINISHED_EVAL_QUEUE_DF,
|
| 77 |
+
RUNNING_EVAL_QUEUE_DF,
|
| 78 |
+
PENDING_EVAL_QUEUE_DF,
|
| 79 |
+
FAILED_EVAL_QUEUE_DF,
|
| 80 |
+
) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
|
| 81 |
+
|
| 82 |
+
ORIGINAL_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
# Searching and filtering
|
| 86 |
|
| 87 |
|
|
|
|
| 244 |
|
| 245 |
# Prepare the dataframes
|
| 246 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 247 |
|
| 248 |
+
leaderboard_df = ORIGINAL_DF.copy()
|
|
|
|
| 249 |
leaderboard_df = filter_models(
|
| 250 |
leaderboard_df,
|
| 251 |
[t.to_str(" : ") for t in ModelType],
|
|
|
|
| 262 |
]
|
| 263 |
leaderboard_df = select_columns(leaderboard_df, INITIAL_COLUMNS)
|
| 264 |
|
| 265 |
+
MAX_MODEL_SIZE = ORIGINAL_DF["#Params (B)"].max()
|
| 266 |
|
| 267 |
|
| 268 |
# Leaderboard demo
|
|
|
|
| 500 |
|
| 501 |
# Dummy leaderboard for handling the case when the user uses backspace key
|
| 502 |
hidden_leaderboard_table_for_search = gr.Dataframe(
|
| 503 |
+
value=ORIGINAL_DF[COLS],
|
| 504 |
headers=COLS,
|
| 505 |
datatype=TYPES,
|
| 506 |
visible=False,
|
|
|
|
| 586 |
|
| 587 |
with gr.Column():
|
| 588 |
with gr.Accordion(
|
| 589 |
+
f"β
Finished Evaluations ({len(FINISHED_EVAL_QUEUE_DF)})",
|
| 590 |
open=False,
|
| 591 |
):
|
| 592 |
with gr.Row():
|
| 593 |
finished_eval_table = gr.Dataframe(
|
| 594 |
+
value=FINISHED_EVAL_QUEUE_DF,
|
| 595 |
headers=EVAL_COLS,
|
| 596 |
datatype=EVAL_TYPES,
|
| 597 |
row_count=5,
|
| 598 |
)
|
| 599 |
with gr.Accordion(
|
| 600 |
+
f"π Running Evaluation Queue ({len(RUNNING_EVAL_QUEUE_DF)})",
|
| 601 |
open=False,
|
| 602 |
):
|
| 603 |
with gr.Row():
|
| 604 |
running_eval_table = gr.Dataframe(
|
| 605 |
+
value=RUNNING_EVAL_QUEUE_DF,
|
| 606 |
headers=EVAL_COLS,
|
| 607 |
datatype=EVAL_TYPES,
|
| 608 |
row_count=5,
|
| 609 |
)
|
| 610 |
|
| 611 |
with gr.Accordion(
|
| 612 |
+
f"β³ Pending Evaluation Queue ({len(PENDING_EVAL_QUEUE_DF)})",
|
| 613 |
open=False,
|
| 614 |
):
|
| 615 |
with gr.Row():
|
| 616 |
pending_eval_table = gr.Dataframe(
|
| 617 |
+
value=PENDING_EVAL_QUEUE_DF,
|
| 618 |
headers=EVAL_COLS,
|
| 619 |
datatype=EVAL_TYPES,
|
| 620 |
row_count=5,
|
| 621 |
)
|
| 622 |
with gr.Accordion(
|
| 623 |
+
f"β Failed Evaluation Queue ({len(FAILED_EVAL_QUEUE_DF)})",
|
| 624 |
open=False,
|
| 625 |
):
|
| 626 |
with gr.Row():
|
| 627 |
failed_eval_table = gr.Dataframe(
|
| 628 |
+
value=FAILED_EVAL_QUEUE_DF,
|
| 629 |
headers=EVAL_COLS,
|
| 630 |
datatype=EVAL_TYPES,
|
| 631 |
row_count=5,
|