oliver-aizip commited on
Commit
15dd199
·
1 Parent(s): 347797e

refresh and persistent logging

Browse files
Files changed (3) hide show
  1. app.py +7 -14
  2. utils/leaderboard.py +17 -3
  3. utils/vote_logger.py +19 -10
app.py CHANGED
@@ -406,36 +406,29 @@ The Elo rating system provides a more accurate ranking than simple win rates:
406
  outputs=[results_table_display]
407
  )
408
 
409
- # Alternative approach: use two separate clicks for each button
410
- # First click event: Update UI immediately
411
  for btn in [random_question_btn, try_another_btn]:
412
- # Handle UI updates first
413
- event1 = btn.click(
414
  fn=show_loading_state,
415
  inputs=[],
416
  outputs=[summary_a_display, summary_b_display, vote_button_a,
417
  vote_button_b, vote_button_tie, vote_button_neither]
418
  ).then(
 
419
  fn=handle_new_example_click,
420
  inputs=[],
421
  outputs=[current_example]
422
  ).then(
 
423
  fn=update_ui_for_new_context,
424
  inputs=[current_example],
425
  outputs=[query_display, context_description, context_display,
426
  context_toggle_btn, show_full_context]
427
- )
428
-
429
- # Second click event for each button runs in parallel with the first
430
- for btn in [random_question_btn, try_another_btn]:
431
- # Generate model outputs (potentially slower operation)
432
- event2 = btn.click(
433
- fn=handle_new_example_click, # This will be called separately from the first event
434
- inputs=[],
435
- outputs=[current_example]
436
  ).then(
 
437
  fn=process_example,
438
- inputs=[current_example],
439
  outputs=[model_a_name, model_b_name, summary_a_text, summary_b_text,
440
  selected_winner, feedback_list, show_results_state, results_agg,
441
  summary_a_display, summary_b_display, vote_button_a, vote_button_b,
 
406
  outputs=[results_table_display]
407
  )
408
 
409
+ # Use a single event chain for each button, structured to update UI first, then run inference
 
410
  for btn in [random_question_btn, try_another_btn]:
411
+ btn.click(
412
+ # Step 1: Show loading state immediately
413
  fn=show_loading_state,
414
  inputs=[],
415
  outputs=[summary_a_display, summary_b_display, vote_button_a,
416
  vote_button_b, vote_button_tie, vote_button_neither]
417
  ).then(
418
+ # Step 2: Get new example
419
  fn=handle_new_example_click,
420
  inputs=[],
421
  outputs=[current_example]
422
  ).then(
423
+ # Step 3: Update context UI immediately
424
  fn=update_ui_for_new_context,
425
  inputs=[current_example],
426
  outputs=[query_display, context_description, context_display,
427
  context_toggle_btn, show_full_context]
 
 
 
 
 
 
 
 
 
428
  ).then(
429
+ # Step 4: Then process example for model outputs
430
  fn=process_example,
431
+ inputs=[current_example],
432
  outputs=[model_a_name, model_b_name, summary_a_text, summary_b_text,
433
  selected_winner, feedback_list, show_results_state, results_agg,
434
  summary_a_display, summary_b_display, vote_button_a, vote_button_b,
utils/leaderboard.py CHANGED
@@ -3,6 +3,7 @@ import pandas as pd
3
  import math
4
  from datetime import datetime
5
  from .models import models
 
6
 
7
  # Default K-factor (determines how much a single match affects ratings)
8
  DEFAULT_K_FACTOR = 32
@@ -10,6 +11,18 @@ DEFAULT_K_FACTOR = 32
10
  # Default starting Elo
11
  DEFAULT_ELO = 1500
12
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  def prepare_url(model_dict: dict):
14
  """
15
  Prepare the URL for the model based on its name.
@@ -119,7 +132,7 @@ def load_leaderboard_data():
119
 
120
  try:
121
  # Define the path to the CSV file for leaderboard
122
- csv_path = os.path.join('utils', 'arena_elo_leaderboard.csv')
123
 
124
  # Check if the file exists and load it
125
  if os.path.exists(csv_path):
@@ -247,8 +260,9 @@ def save_leaderboard_data(results):
247
  df = df.sort_values(by='elo', ascending=False)
248
 
249
  # Save to CSV
250
- df.to_csv(csv_path, index=False)
251
- print(f"Leaderboard data saved successfully to {csv_path}")
 
252
  except Exception as e:
253
  print(f"Error saving leaderboard data: {e}")
254
 
 
3
  import math
4
  from datetime import datetime
5
  from .models import models
6
+ from huggingface_hub import CommitScheduler
7
 
8
  # Default K-factor (determines how much a single match affects ratings)
9
  DEFAULT_K_FACTOR = 32
 
11
  # Default starting Elo
12
  DEFAULT_ELO = 1500
13
 
14
+ LEADERBOARD_FN = './arena_elo_leaderboard.csv'
15
+
16
+ csv_path = os.path.join('utils', 'arena_elo_leaderboard.csv')
17
+
18
+ leaderboard_scheduler = CommitScheduler(
19
+ repo_id="aizip-dev/Arena-Metadata",
20
+ folder_path="leaderboard",
21
+ repo_type="dataset",
22
+ every=5
23
+ )
24
+
25
+
26
  def prepare_url(model_dict: dict):
27
  """
28
  Prepare the URL for the model based on its name.
 
132
 
133
  try:
134
  # Define the path to the CSV file for leaderboard
135
+ csv_path = LEADERBOARD_FN
136
 
137
  # Check if the file exists and load it
138
  if os.path.exists(csv_path):
 
260
  df = df.sort_values(by='elo', ascending=False)
261
 
262
  # Save to CSV
263
+ with leaderboard_scheduler.lock:
264
+ df.to_csv(csv_path, index=False)
265
+ print(f"Leaderboard data saved successfully to {csv_path}")
266
  except Exception as e:
267
  print(f"Error saving leaderboard data: {e}")
268
 
utils/vote_logger.py CHANGED
@@ -3,6 +3,14 @@ import csv
3
  import json
4
  from datetime import datetime
5
  import pandas as pd
 
 
 
 
 
 
 
 
6
 
7
  def save_vote_details(example, model_a, model_b, winner, feedback, summary_a, summary_b):
8
  """
@@ -35,17 +43,18 @@ def save_vote_details(example, model_a, model_b, winner, feedback, summary_a, su
35
 
36
  try:
37
  # Open the file in append mode
38
- with open(csv_path, 'a', newline='', encoding='utf-8') as f:
39
- writer = csv.DictWriter(f, fieldnames=vote_record.keys())
40
-
41
- # Write header if file doesn't exist
42
- if not file_exists:
43
- writer.writeheader()
44
-
45
- # Write the vote record
46
- writer.writerow(vote_record)
 
47
 
48
- print(f"Vote details saved to {csv_path}")
49
  except Exception as e:
50
  print(f"Error saving vote details: {e}")
51
 
 
3
  import json
4
  from datetime import datetime
5
  import pandas as pd
6
+ from huggingface_hub import CommitScheduler
7
+
8
+ vote_details_scheduler = CommitScheduler(
9
+ repo_id="aizip-dev/Arena-Metadata",
10
+ folder_path="votes",
11
+ repo_type="dataset",
12
+ every=5
13
+ )
14
 
15
  def save_vote_details(example, model_a, model_b, winner, feedback, summary_a, summary_b):
16
  """
 
43
 
44
  try:
45
  # Open the file in append mode
46
+ with vote_details_scheduler:
47
+ with open(csv_path, 'a', newline='', encoding='utf-8') as f:
48
+ writer = csv.DictWriter(f, fieldnames=vote_record.keys())
49
+
50
+ # Write header if file doesn't exist
51
+ if not file_exists:
52
+ writer.writeheader()
53
+
54
+ # Write the vote record
55
+ writer.writerow(vote_record)
56
 
57
+ print(f"Vote details saved to {csv_path}")
58
  except Exception as e:
59
  print(f"Error saving vote details: {e}")
60