Spaces:
Sleeping
Sleeping
Commit
Β·
65af9f4
1
Parent(s):
12c08d3
Update app.py
Browse files
app.py
CHANGED
|
@@ -297,7 +297,7 @@ with demo:
|
|
| 297 |
)
|
| 298 |
|
| 299 |
with gr.Tabs(elem_classes="tab-buttons") as tabs:
|
| 300 |
-
with gr.TabItem("π
LLM Benchmark
|
| 301 |
leaderboard_table_lite = gr.components.Dataframe(
|
| 302 |
value=leaderboard_df[COLS_LITE],
|
| 303 |
headers=COLS_LITE,
|
|
@@ -319,7 +319,7 @@ with demo:
|
|
| 319 |
leaderboard_table_lite,
|
| 320 |
)
|
| 321 |
|
| 322 |
-
with gr.TabItem("
|
| 323 |
leaderboard_table = gr.components.Dataframe(
|
| 324 |
value=leaderboard_df,
|
| 325 |
headers=COLS,
|
|
@@ -341,16 +341,16 @@ with demo:
|
|
| 341 |
[hidden_leaderboard_table_for_search, search_bar],
|
| 342 |
leaderboard_table,
|
| 343 |
)
|
| 344 |
-
with gr.TabItem("About", elem_id="llm-benchmark-tab-table", id=2):
|
| 345 |
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|
| 346 |
|
| 347 |
-
with gr.TabItem("
|
| 348 |
with gr.Column():
|
| 349 |
with gr.Row():
|
| 350 |
gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
|
| 351 |
|
| 352 |
with gr.Column():
|
| 353 |
-
with gr.Accordion(f"β
Finished Evaluations
|
| 354 |
with gr.Row():
|
| 355 |
finished_eval_table = gr.components.Dataframe(
|
| 356 |
value=finished_eval_queue_df,
|
|
@@ -358,7 +358,7 @@ with demo:
|
|
| 358 |
datatype=EVAL_TYPES,
|
| 359 |
max_rows=5,
|
| 360 |
)
|
| 361 |
-
with gr.Accordion(f"π Running Evaluation Queue
|
| 362 |
with gr.Row():
|
| 363 |
running_eval_table = gr.components.Dataframe(
|
| 364 |
value=running_eval_queue_df,
|
|
@@ -367,7 +367,7 @@ with demo:
|
|
| 367 |
max_rows=5,
|
| 368 |
)
|
| 369 |
|
| 370 |
-
with gr.Accordion(f"β³ Pending Evaluation Queue
|
| 371 |
with gr.Row():
|
| 372 |
pending_eval_table = gr.components.Dataframe(
|
| 373 |
value=pending_eval_queue_df,
|
|
|
|
| 297 |
)
|
| 298 |
|
| 299 |
with gr.Tabs(elem_classes="tab-buttons") as tabs:
|
| 300 |
+
with gr.TabItem("π
LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
|
| 301 |
leaderboard_table_lite = gr.components.Dataframe(
|
| 302 |
value=leaderboard_df[COLS_LITE],
|
| 303 |
headers=COLS_LITE,
|
|
|
|
| 319 |
leaderboard_table_lite,
|
| 320 |
)
|
| 321 |
|
| 322 |
+
with gr.TabItem("π Extended model view", elem_id="llm-benchmark-tab-table", id=1):
|
| 323 |
leaderboard_table = gr.components.Dataframe(
|
| 324 |
value=leaderboard_df,
|
| 325 |
headers=COLS,
|
|
|
|
| 341 |
[hidden_leaderboard_table_for_search, search_bar],
|
| 342 |
leaderboard_table,
|
| 343 |
)
|
| 344 |
+
with gr.TabItem("π About", elem_id="llm-benchmark-tab-table", id=2):
|
| 345 |
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|
| 346 |
|
| 347 |
+
with gr.TabItem("π Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
|
| 348 |
with gr.Column():
|
| 349 |
with gr.Row():
|
| 350 |
gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
|
| 351 |
|
| 352 |
with gr.Column():
|
| 353 |
+
with gr.Accordion(f"β
Finished Evaluations ({len(finished_eval_queue_df)})", open=False):
|
| 354 |
with gr.Row():
|
| 355 |
finished_eval_table = gr.components.Dataframe(
|
| 356 |
value=finished_eval_queue_df,
|
|
|
|
| 358 |
datatype=EVAL_TYPES,
|
| 359 |
max_rows=5,
|
| 360 |
)
|
| 361 |
+
with gr.Accordion(f"π Running Evaluation Queue ({len(running_eval_queue_df)})", open=False):
|
| 362 |
with gr.Row():
|
| 363 |
running_eval_table = gr.components.Dataframe(
|
| 364 |
value=running_eval_queue_df,
|
|
|
|
| 367 |
max_rows=5,
|
| 368 |
)
|
| 369 |
|
| 370 |
+
with gr.Accordion(f"β³ Pending Evaluation Queue ({len(pending_eval_queue_df)})", open=False):
|
| 371 |
with gr.Row():
|
| 372 |
pending_eval_table = gr.components.Dataframe(
|
| 373 |
value=pending_eval_queue_df,
|