CodeGoat24 commited on
Commit
fd1a72a
·
verified ·
1 Parent(s): 960d9a7

Delete populate.py

Browse files
Files changed (1) hide show
  1. populate.py +0 -117
populate.py DELETED
@@ -1,117 +0,0 @@
1
- import json
2
- import os
3
-
4
- import pandas as pd
5
-
6
- from src.display.formatting import has_no_nan_values, make_clickable_model
7
- from src.display.utils import AutoEvalColumn, EvalQueueColumn, ModelType, Precision, WeightType
8
- from src.leaderboard.read_evals import get_raw_eval_results
9
- from src.about import Tasks
10
-
11
-
12
- def load_csv_results():
13
- """Load results from main-results.csv file"""
14
- csv_path = "main-results.csv"
15
- if not os.path.exists(csv_path):
16
- return []
17
-
18
- df = pd.read_csv(csv_path)
19
- results = []
20
-
21
- for _, row in df.iterrows():
22
- # Parse parameters - handle different formats
23
- param_str = str(row['Param'])
24
- if 'activated' in param_str:
25
- # Extract the activated parameter count (e.g., "2.8B activated (16B total)")
26
- param_value = float(param_str.split('B')[0])
27
- elif 'B' in param_str:
28
- # Simple format (e.g., "9B")
29
- param_value = float(param_str.replace('B', ''))
30
- else:
31
- param_value = 0
32
-
33
- # Convert CSV data to the format expected by the leaderboard
34
- data_dict = {
35
- AutoEvalColumn.model.name: make_clickable_model(row['Model']),
36
- AutoEvalColumn.average.name: row['ACC'], # Using ACC as the average score
37
- AutoEvalColumn.params.name: param_value,
38
- AutoEvalColumn.license.name: "Open Source" if row['Open Source?'] == 'Yes' else "Proprietary",
39
- AutoEvalColumn.model_type.name: ModelType.FT.value.name, # Default to fine-tuned
40
- AutoEvalColumn.precision.name: Precision.float16.value.name, # Default precision
41
- AutoEvalColumn.weight_type.name: WeightType.Original.value.name,
42
- AutoEvalColumn.architecture.name: "Unknown",
43
- AutoEvalColumn.still_on_hub.name: True,
44
- AutoEvalColumn.revision.name: "",
45
- AutoEvalColumn.likes.name: 0,
46
- AutoEvalColumn.model_type_symbol.name: ModelType.FT.value.symbol,
47
- }
48
-
49
- # Add task-specific scores (required by the leaderboard)
50
- for task in Tasks:
51
- data_dict[task.name] = row['ACC'] # Use the same ACC score for all tasks
52
-
53
- results.append(data_dict)
54
-
55
- return results
56
-
57
-
58
- def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
59
- """Creates a dataframe from all the individual experiment results"""
60
- raw_data = get_raw_eval_results(results_path, requests_path)
61
- all_data_json = [v.to_dict() for v in raw_data]
62
-
63
- # If no JSON data found, try loading from CSV
64
- if not all_data_json:
65
- all_data_json = load_csv_results()
66
-
67
- if not all_data_json:
68
- # Return empty dataframe if no data found
69
- return pd.DataFrame(columns=cols)
70
-
71
- df = pd.DataFrame.from_records(all_data_json)
72
-
73
- # Only include columns that exist in the dataframe
74
- existing_cols = [col for col in cols if col in df.columns]
75
-
76
- df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
77
- df = df[existing_cols].round(decimals=2)
78
-
79
- # filter out if any of the benchmarks have not been produced
80
- df = df[has_no_nan_values(df, benchmark_cols)]
81
- return df
82
-
83
-
84
- def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
85
- """Creates the different dataframes for the evaluation queues requestes"""
86
- entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")]
87
- all_evals = []
88
-
89
- for entry in entries:
90
- if ".json" in entry:
91
- file_path = os.path.join(save_path, entry)
92
- with open(file_path) as fp:
93
- data = json.load(fp)
94
-
95
- data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
96
- data[EvalQueueColumn.revision.name] = data.get("revision", "main")
97
-
98
- all_evals.append(data)
99
- elif ".md" not in entry:
100
- # this is a folder
101
- sub_entries = [e for e in os.listdir(f"{save_path}/{entry}") if os.path.isfile(e) and not e.startswith(".")]
102
- for sub_entry in sub_entries:
103
- file_path = os.path.join(save_path, entry, sub_entry)
104
- with open(file_path) as fp:
105
- data = json.load(fp)
106
-
107
- data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
108
- data[EvalQueueColumn.revision.name] = data.get("revision", "main")
109
- all_evals.append(data)
110
-
111
- pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]]
112
- running_list = [e for e in all_evals if e["status"] == "RUNNING"]
113
- finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"]
114
- df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
115
- df_running = pd.DataFrame.from_records(running_list, columns=cols)
116
- df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
117
- return df_finished[cols], df_running[cols], df_pending[cols]