rzanoli commited on
Commit
c996d40
·
1 Parent(s): 8886020

Small changes

Browse files
src/leaderboard/read_evals.py CHANGED
@@ -1,95 +1,203 @@
1
- import glob
2
- import json
3
- import math
4
- import os
5
- from dataclasses import dataclass
6
- from src.display.formatting import make_clickable_model
7
- from src.display.utils import AutoEvalColumn, ModelType, Tasks, Precision, WeightType, FewShotType
8
- from src.submission.check_validity import is_model_on_hub
9
-
10
- @dataclass
11
- class EvalResult:
12
- eval_name: str
13
- full_model: str
14
- org: str
15
- model: str
16
- revision: str
17
- results: dict
18
- average_CPS: str
19
- fewshot: int
20
- fewshot_type: FewShotType = FewShotType.Unknown
21
- weight_type: WeightType = WeightType.Original
22
- architecture: str = "Unknown"
23
- license: str = "?"
24
- likes: int = 0
25
- num_params: int = 0
26
- date: str = ""
27
- still_on_hub: bool = False
28
-
29
- @classmethod
30
- def init_from_json_file(cls, json_filepath):
31
- with open(json_filepath) as fp:
32
- data = json.load(fp)
33
-
34
- config = data.get("config")
35
- average_CPS = f"{data.get('average_CPS'):.2f}"
36
-
37
- num_fewshot = int(config.get("num_fewshot", 0))
38
- fewshot_type = FewShotType.from_num_fewshot(num_fewshot)
39
-
40
- model_type = ModelType.from_str(config.get("model_type")) if config.get("model_type") else None
41
- num_params = math.ceil(config.get("num_params_billion", 0)) if config.get("num_params_billion") else 0
42
-
43
- org_and_model = config.get("model_name", "").split("/", 1)
44
- org, model = (org_and_model if len(org_and_model) == 2 else (None, org_and_model[0]))
45
-
46
- full_model = "/".join([org, model] if org else [model])
47
- still_on_hub, _, model_config = is_model_on_hub(full_model, config.get("model_sha", "main"))
48
-
49
- architecture = ";".join(getattr(model_config, "architectures", [])) if model_config else "?"
50
-
51
- results = {
52
- task.value.benchmark: f"{data['tasks'].get(task.value.benchmark, {}).get(task.metric_type, 0):.2f}"
53
- for task in Tasks
54
- }
55
-
56
- return cls(
57
- eval_name=f"{model}_{num_fewshot}",
58
- full_model=full_model,
59
- org=org,
60
- model=model,
61
- results=results,
62
- average_CPS=average_CPS,
63
- fewshot=fewshot_type,
64
- fewshot_type=fewshot_type,
65
- revision=config.get("model_sha", ""),
66
- still_on_hub=still_on_hub,
67
- architecture=architecture,
68
- num_params=num_params
69
- )
70
-
71
- def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResult]:
72
- model_result_filepaths = [
73
- os.path.join(root, file)
74
- for root, _, files in os.walk(results_path)
75
- for file in sorted(files, key=lambda x: x.split("_")[-1], reverse=True) if file.endswith(".json")
76
- ]
77
-
78
- eval_results = {}
79
- for model_result_filepath in model_result_filepaths:
80
- eval_result = EvalResult.init_from_json_file(model_result_filepath)
81
- eval_name = eval_result.eval_name
82
- if eval_name not in eval_results:
83
- eval_results[eval_name] = eval_result
84
- else:
85
- eval_results[eval_name].results.update(eval_result.results)
86
-
87
- results = []
88
- for v in eval_results.values():
89
- try:
90
- v.to_dict() # Test if the dict version is complete
91
- results.append(v)
92
- except KeyError:
93
- continue
94
-
95
- return results
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import json
3
+ import math
4
+ import os
5
+ from dataclasses import dataclass
6
+
7
+ import dateutil
8
+ import numpy as np
9
+
10
+ #from get_model_info import num_params
11
+ from src.display.formatting import make_clickable_model
12
+ from src.display.utils import AutoEvalColumn, ModelType, Tasks, Precision, WeightType, FewShotType
13
+ from src.submission.check_validity import is_model_on_hub
14
+
15
+
16
+ @dataclass
17
+ class EvalResult:
18
+ """Represents one full evaluation. Built from a combination of the result and request file for a given run.
19
+ """
20
+ eval_name: str # org_model_precision (uid)
21
+ full_model: str # org/model (path on hub)
22
+ org: str
23
+ model: str
24
+ revision: str # commit hash, "" if main
25
+ results: dict
26
+ average_CPS: str
27
+ fewshot: int
28
+ fewshot_type: FewShotType = FewShotType.Unknown
29
+ weight_type: WeightType = WeightType.Original # Original or Adapter
30
+ architecture: str = "Unknown"
31
+ license: str = "?"
32
+ likes: int = 0
33
+ num_params: int = 0
34
+ date: str = "" # submission date of request file
35
+ still_on_hub: bool = False
36
+
37
+ @classmethod
38
+ def init_from_json_file(self, json_filepath):
39
+ """Inits the result from the specific model result file"""
40
+ with open(json_filepath) as fp:
41
+ data = json.load(fp)
42
+
43
+ config = data.get("config")
44
+
45
+ average_CPS = f"{data.get('average_CPS'):.2f}"
46
+
47
+ num_fewshot = config.get("num_fewshot", 0) # Imposta il valore predefinito a 0
48
+ try:
49
+ num_fewshot = int(num_fewshot) # Converte in intero se possibile
50
+ except ValueError:
51
+ num_fewshot = 0 # Se la conversione fallisce, assegna 0
52
+ # Determine the few-shot type (ZS or FS) based on num_fewshot
53
+ fewshot_type = FewShotType.from_num_fewshot(num_fewshot) # Use the new
54
+
55
+ num_params = int(0)
56
+ num_params_billion = config.get("num_params_billion")
57
+ if num_params_billion is not None:
58
+ num_params = math.ceil(num_params_billion)
59
+
60
+ # Get model and org
61
+ org_and_model = config.get("model_name", config.get("model_args", None))
62
+ org_and_model = org_and_model.split("/", 1)
63
+
64
+ if len(org_and_model) == 1:
65
+ org = None
66
+ model = org_and_model[0]
67
+ #result_key = f"{model}_{precision.value.name}"
68
+ result_key = f"{model}_{num_fewshot}"
69
+ else:
70
+ org = org_and_model[0]
71
+ model = org_and_model[1]
72
+ #result_key = f"{org}_{model}_{precision.value.name}"
73
+ result_key = f"{org}_{model}_{num_fewshot}"
74
+ full_model = "/".join(org_and_model)
75
+
76
+ still_on_hub, _, model_config = is_model_on_hub(
77
+ full_model, config.get("model_sha", "main"), trust_remote_code=True, test_tokenizer=False
78
+ )
79
+ architecture = "?"
80
+ if model_config is not None:
81
+ architectures = getattr(model_config, "architectures", None)
82
+ if architectures:
83
+ architecture = ";".join(architectures)
84
+
85
+ # Extract results available in this file (some results are split in several files)
86
+ results = {}
87
+ for task in Tasks:
88
+ task = task.value
89
+
90
+ for k, v in data["tasks"].items():
91
+ if task.benchmark[:-2] == k:
92
+ if "Best Prompt Id" in task.col_name:
93
+ results[task.benchmark] = int(v[task.metric_type][-1:])
94
+ else:
95
+ results[task.benchmark] = f"{v[task.metric_type]:.2f}" # Ensure two decimals for display
96
+
97
+ return self(
98
+ eval_name=result_key,
99
+ full_model=full_model,
100
+ org=org,
101
+ model=model,
102
+ results=results,
103
+ average_CPS=average_CPS,
104
+ fewshot_type=fewshot_type,
105
+ fewshot=num_fewshot,
106
+ revision= config.get("model_sha", ""),
107
+ still_on_hub=still_on_hub,
108
+ architecture=architecture,
109
+ num_params=num_params
110
+ )
111
+
112
+ '''
113
+ def update_with_request_file(self, requests_path):
114
+ """Finds the relevant request file for the current model and updates info with it"""
115
+ request_file = get_request_file_for_model(requests_path, self.full_model, self.precision.value.name)
116
+
117
+ try:
118
+ with open(request_file, "r") as f:
119
+ request = json.load(f)
120
+ self.model_type = ModelType.from_str(request.get("model_type", ""))
121
+ self.weight_type = WeightType[request.get("weight_type", "Original")]
122
+ self.license = request.get("license", "?")
123
+ self.likes = request.get("likes", 0)
124
+ self.num_params = request.get("params", 0)
125
+ self.date = request.get("submitted_time", "")
126
+ except Exception:
127
+ print(f"Could not find request file for {self.org}/{self.model} with precision
128
+ '''
129
+
130
+ def to_dict(self):
131
+ """Converts the Eval Result to a dict compatible with our dataframe display"""
132
+ average = self.average_CPS
133
+
134
+ fewshot_type_symbol = (
135
+ self.fewshot_type.value.symbol if isinstance(self.fewshot_type, FewShotType) else "❓"
136
+ )
137
+
138
+ data_dict = {
139
+ "eval_name": self.eval_name, # not a column, just a save name,
140
+ #AutoEvalColumn.precision.name: self.precision.value.name,
141
+ #AutoEvalColumn.model_type.name: self.model_type.value.name,
142
+ #AutoEvalColumn.model_type_symbol.name: self.model_type.value.symbol,
143
+ #AutoEvalColumn.model_type.name: self.model_type.value.name if self.model_type else "Unknown",
144
+ #AutoEvalColumn.model_type_symbol.name: self.model_type.value.symbol if self.model_type else "Unknown",
145
+ AutoEvalColumn.fewshot_type.name: fewshot_type_symbol, # Simbolo corretto per fewshot type
146
+ AutoEvalColumn.weight_type.name: self.weight_type.value.name,
147
+ AutoEvalColumn.architecture.name: self.architecture,
148
+ AutoEvalColumn.model.name: make_clickable_model(self.full_model),
149
+ AutoEvalColumn.revision.name: self.revision,
150
+ AutoEvalColumn.average.name: average,
151
+ #AutoEvalColumn.fewshot.name: fewshot,
152
+ AutoEvalColumn.license.name: self.license,
153
+ AutoEvalColumn.likes.name: self.likes,
154
+ AutoEvalColumn.params.name: self.num_params,
155
+ AutoEvalColumn.still_on_hub.name: self.still_on_hub,
156
+ }
157
+
158
+ for task in Tasks:
159
+ data_dict[task.value.col_name] = self.results[task.value.benchmark]
160
+
161
+ return data_dict
162
+
163
+
164
+ def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResult]:
165
+ """From the path of the results folder root, extract all needed info for results"""
166
+ model_result_filepaths = []
167
+
168
+ for root, _, files in os.walk(results_path):
169
+ # We should only have json files in model results
170
+ if len(files) == 0 or any([not f.endswith(".json") for f in files]):
171
+ continue
172
+
173
+ # Sort the files by date
174
+ try:
175
+ files.sort(key=lambda x: x.removesuffix(".json").removeprefix("results_")[:-7])
176
+ except dateutil.parser._parser.ParserError:
177
+ files = [files[-1]]
178
+
179
+ for file in files:
180
+ model_result_filepaths.append(os.path.join(root, file))
181
+
182
+ eval_results = {}
183
+ for model_result_filepath in model_result_filepaths:
184
+ # Creation of result
185
+ eval_result = EvalResult.init_from_json_file(model_result_filepath)
186
+ #eval_result.update_with_request_file(requests_path)
187
+
188
+ # Store results of same eval together
189
+ eval_name = eval_result.eval_name
190
+ if eval_name in eval_results.keys():
191
+ eval_results[eval_name].results.update({k: v for k, v in eval_result.results.items() if v is not None})
192
+ else:
193
+ eval_results[eval_name] = eval_result
194
+
195
+ results = []
196
+ for v in eval_results.values():
197
+ try:
198
+ v.to_dict() # we test if the dict version is complete
199
+ results.append(v)
200
+ except KeyError: # not all eval values present
201
+ continue
202
+
203
+ return results
src/leaderboard/read_evals_old.py DELETED
@@ -1,296 +0,0 @@
1
- import glob
2
- import json
3
- import math
4
- import os
5
- from dataclasses import dataclass
6
-
7
- import dateutil
8
- import numpy as np
9
-
10
- #from get_model_info import num_params
11
- from src.display.formatting import make_clickable_model
12
- from src.display.utils import AutoEvalColumn, ModelType, Tasks, Precision, WeightType, FewShotType
13
- from src.submission.check_validity import is_model_on_hub
14
-
15
-
16
- @dataclass
17
- class EvalResult:
18
- """Represents one full evaluation. Built from a combination of the result and request file for a given run.
19
- """
20
- eval_name: str # org_model_precision (uid)
21
- full_model: str # org/model (path on hub)
22
- org: str
23
- model: str
24
- revision: str # commit hash, "" if main
25
- results: dict
26
- average_CPS: str
27
- fewshot: int
28
- #fewshot_type: str
29
- #precision: Precision = Precision.Unknown
30
- #model_type: ModelType = ModelType.Unknown # Pretrained, fine tuned, ...
31
- fewshot_type: FewShotType = FewShotType.Unknown
32
- weight_type: WeightType = WeightType.Original # Original or Adapter
33
- architecture: str = "Unknown"
34
- license: str = "?"
35
- likes: int = 0
36
- num_params: int = 0
37
- date: str = "" # submission date of request file
38
- still_on_hub: bool = False
39
-
40
- @classmethod
41
- def init_from_json_file(self, json_filepath):
42
- """Inits the result from the specific model result file"""
43
- with open(json_filepath) as fp:
44
- data = json.load(fp)
45
-
46
- config = data.get("config")
47
-
48
- average_CPS = f"{data.get('average_CPS'):.2f}"
49
-
50
- num_fewshot = config.get("num_fewshot", 0) # Imposta il valore predefinito a 0
51
- try:
52
- num_fewshot = int(num_fewshot) # Converte in intero se possibile
53
- except ValueError:
54
- num_fewshot = 0 # Se la conversione fallisce, assegna 0
55
-
56
- # Determine the few-shot type (ZS or FS) based on num_fewshot
57
- fewshot_type = FewShotType.from_num_fewshot(num_fewshot) # Use the new
58
-
59
-
60
- #precision = config.get("precision")
61
-
62
- #print(precision)
63
-
64
- #print(config, num_fewshot)
65
-
66
- # Precision
67
- #precision = Precision.from_str(config.get("model_dtype"))
68
-
69
- model_type = config.get("model_type")
70
- # Modifica: Convertire model_type in un oggetto Enum (se è un Enum)
71
- model_type = ModelType.from_str(model_type) if model_type else None
72
-
73
- #print("=====================", model_type, config.get("model_name"))
74
-
75
- # Initialize num_params with a default value (e.g., 0)
76
- num_params = int(0)
77
- # Controlla se "num_params_billion" esiste in config e non è null
78
- num_params_billion = config.get("num_params_billion")
79
- if num_params_billion is not None:
80
- num_params = math.ceil(num_params_billion)
81
-
82
- print("^^^^^^^^^^^^^^^^^^^^^^^^^", num_params, config.get("num_params_billion"))
83
-
84
-
85
-
86
- # Get model and org
87
- org_and_model = config.get("model_name", config.get("model_args", None))
88
- org_and_model = org_and_model.split("/", 1)
89
-
90
- #print(precision.value.name)
91
-
92
- if len(org_and_model) == 1:
93
- org = None
94
- model = org_and_model[0]
95
- #result_key = f"{model}_{precision.value.name}"
96
- result_key = f"{model}_{num_fewshot}"
97
- else:
98
- org = org_and_model[0]
99
- model = org_and_model[1]
100
- #result_key = f"{org}_{model}_{precision.value.name}"
101
- result_key = f"{org}_{model}_{num_fewshot}"
102
- full_model = "/".join(org_and_model)
103
-
104
- still_on_hub, _, model_config = is_model_on_hub(
105
- full_model, config.get("model_sha", "main"), trust_remote_code=True, test_tokenizer=False
106
- )
107
- architecture = "?"
108
- if model_config is not None:
109
- architectures = getattr(model_config, "architectures", None)
110
- if architectures:
111
- architecture = ";".join(architectures)
112
-
113
- # Extract results available in this file (some results are split in several files)
114
- results = {}
115
- for task in Tasks:
116
- task = task.value
117
-
118
- '''
119
- # We average all scores of a given metric (not all metrics are present in all files)
120
- accs = np.array([v.get(task.metric, None) for k, v in data["results"].items() if task.benchmark == k])
121
- if accs.size == 0 or any([acc is None for acc in accs]):
122
- continue
123
-
124
- mean_acc = np.mean(accs) * 100.0
125
- results[task.benchmark] = mean_acc
126
- '''
127
-
128
- for k, v in data["tasks"].items():
129
- #if task.benchmark == k:
130
- if task.benchmark[:-2] == k:
131
- # print(k, "==================", v)
132
- # results[task.benchmark] = v[task.cps]
133
-
134
- #print(task.benchmark, v[task.metric])
135
-
136
- if "Best Prompt Id" in task.col_name:
137
- results[task.benchmark] = int(v[task.metric_type][-1:])
138
- #print(results[task.benchmark],v[task.metric_type][-1:])
139
- else:
140
- #results[task.benchmark] = round(v[task.metric_type], 2)
141
- # Format the value to 2 decimal places (ensure it's always shown as xx.xx)
142
- results[task.benchmark] = f"{v[task.metric_type]:.2f}" # Ensure two decimals for display
143
-
144
-
145
- #results[task.benchmark + "_" + task.metric] = 1.0
146
-
147
-
148
- #results[task.benchmark] = v[task.accuracy]
149
- # print("======", results[task.benchmark])
150
- #results[task.benchmark] = 1.0
151
-
152
- return self(
153
- eval_name=result_key,
154
- full_model=full_model,
155
- org=org,
156
- model=model,
157
- results=results,
158
- average_CPS=average_CPS,
159
- fewshot_type=fewshot_type, # Set the fewshot type (ZS or FS)
160
- fewshot=num_fewshot,
161
- #model_type=model_type,
162
- #precision=precision,
163
- revision= config.get("model_sha", ""),
164
- still_on_hub=still_on_hub,
165
- architecture=architecture,
166
- num_params=num_params
167
- )
168
-
169
- '''
170
- def update_with_request_file(self, requests_path):
171
- """Finds the relevant request file for the current model and updates info with it"""
172
- request_file = get_request_file_for_model(requests_path, self.full_model, self.precision.value.name)
173
-
174
- try:
175
- with open(request_file, "r") as f:
176
- request = json.load(f)
177
- self.model_type = ModelType.from_str(request.get("model_type", ""))
178
- self.weight_type = WeightType[request.get("weight_type", "Original")]
179
- self.license = request.get("license", "?")
180
- self.likes = request.get("likes", 0)
181
- self.num_params = request.get("params", 0)
182
- self.date = request.get("submitted_time", "")
183
- except Exception:
184
- print(f"Could not find request file for {self.org}/{self.model} with precision
185
- '''
186
-
187
- def to_dict(self):
188
- """Converts the Eval Result to a dict compatible with our dataframe display"""
189
- #average = sum([v for v in self.results.values() if v is not None]) / len(Tasks)
190
- average = self.average_CPS
191
- fewshot = self.fewshot
192
-
193
- # Ottiene il simbolo di FewShotType in modo simile a ModelType
194
- fewshot_type_symbol = (
195
- self.fewshot_type.value.symbol if isinstance(self.fewshot_type, FewShotType) else "❓"
196
- )
197
-
198
- #("?????", fewshot)
199
- data_dict = {
200
- "eval_name": self.eval_name, # not a column, just a save name,
201
- #AutoEvalColumn.precision.name: self.precision.value.name,
202
- #AutoEvalColumn.model_type.name: self.model_type.value.name,
203
- #AutoEvalColumn.model_type_symbol.name: self.model_type.value.symbol,
204
-
205
- #AutoEvalColumn.model_type.name: self.model_type.value.name if self.model_type else "Unknown",
206
- #AutoEvalColumn.model_type_symbol.name: self.model_type.value.symbol if self.model_type else "Unknown",
207
-
208
-
209
-
210
- AutoEvalColumn.fewshot_type.name: fewshot_type_symbol, # Simbolo corretto per fewshot type
211
-
212
-
213
-
214
-
215
- AutoEvalColumn.weight_type.name: self.weight_type.value.name,
216
- AutoEvalColumn.architecture.name: self.architecture,
217
- AutoEvalColumn.model.name: make_clickable_model(self.full_model),
218
- AutoEvalColumn.revision.name: self.revision,
219
- AutoEvalColumn.average.name: average,
220
- #AutoEvalColumn.fewshot.name: fewshot,
221
- AutoEvalColumn.license.name: self.license,
222
- AutoEvalColumn.likes.name: self.likes,
223
- AutoEvalColumn.params.name: self.num_params,
224
- AutoEvalColumn.still_on_hub.name: self.still_on_hub,
225
- }
226
-
227
- for task in Tasks:
228
- data_dict[task.value.col_name] = self.results[task.value.benchmark]
229
-
230
- return data_dict
231
-
232
- '''
233
- def get_request_file_for_model(requests_path, model_name, precision):
234
- """Selects the correct request file for a given model. Only keeps runs tagged as FINISHED"""
235
- request_files = os.path.join(
236
- requests_path,
237
- f"{model_name}_eval_request_*.json",
238
- )
239
- request_files = glob.glob(request_files)
240
-
241
- # Select correct request file (precision)
242
- request_file = ""
243
- request_files = sorted(request_files, reverse=True)
244
- for tmp_request_file in request_files:
245
- with open(tmp_request_file, "r") as f:
246
- req_content = json.load(f)
247
- if (
248
- req_content["status"] in ["FINISHED"]
249
- and req_content["precision"] == precision.split(".")[-1]
250
- ):
251
- request_file = tmp_request_file
252
- return request_file
253
- '''
254
-
255
-
256
-
257
- def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResult]:
258
- """From the path of the results folder root, extract all needed info for results"""
259
- model_result_filepaths = []
260
-
261
- for root, _, files in os.walk(results_path):
262
- # We should only have json files in model results
263
- if len(files) == 0 or any([not f.endswith(".json") for f in files]):
264
- continue
265
-
266
- # Sort the files by date
267
- try:
268
- files.sort(key=lambda x: x.removesuffix(".json").removeprefix("results_")[:-7])
269
- except dateutil.parser._parser.ParserError:
270
- files = [files[-1]]
271
-
272
- for file in files:
273
- model_result_filepaths.append(os.path.join(root, file))
274
-
275
- eval_results = {}
276
- for model_result_filepath in model_result_filepaths:
277
- # Creation of result
278
- eval_result = EvalResult.init_from_json_file(model_result_filepath)
279
- #eval_result.update_with_request_file(requests_path)
280
-
281
- # Store results of same eval together
282
- eval_name = eval_result.eval_name
283
- if eval_name in eval_results.keys():
284
- eval_results[eval_name].results.update({k: v for k, v in eval_result.results.items() if v is not None})
285
- else:
286
- eval_results[eval_name] = eval_result
287
-
288
- results = []
289
- for v in eval_results.values():
290
- try:
291
- v.to_dict() # we test if the dict version is complete
292
- results.append(v)
293
- except KeyError: # not all eval values present
294
- continue
295
-
296
- return results