eval pretrain-core-3
Browse files
README.md
CHANGED
@@ -217,3 +217,59 @@ litgpt convert_pretrained_checkpoint ../out/pretrain-core-2/final ../out/pretrai
|
|
217 |
```bash
|
218 |
CUDA_VISIBLE_DEVICES=0 CUDA_LAUNCH_BLOCKING=0 PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True litgpt pretrain --config pretrain_core_model_3.yaml
|
219 |
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
217 |
```bash
|
218 |
CUDA_VISIBLE_DEVICES=0 CUDA_LAUNCH_BLOCKING=0 PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True litgpt pretrain --config pretrain_core_model_3.yaml
|
219 |
```
|
220 |
+
|
221 |
+
```bash
|
222 |
+
CUDA_VISIBLE_DEVICES=0 CUDA_LAUNCH_BLOCKING=0 PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True time litgpt evaluate --tasks 'leaderboard' --out_dir '../evaluate/pretrain-core-3/leaderboard/' --batch_size '4' --dtype 'bfloat16' '../out/pretrain-core-3/final'
|
223 |
+
```
|
224 |
+
|
225 |
+
```
|
226 |
+
| Tasks |Version|Filter|n-shot| Metric | |Value | |Stderr|
|
227 |
+
|-----------------------------------------------------------|-------|------|-----:|-----------------------|---|-----:|---|------|
|
228 |
+
|leaderboard | N/A| | | | | | | |
|
229 |
+
| - leaderboard_bbh | N/A| | | | | | | |
|
230 |
+
| - leaderboard_bbh_boolean_expressions | 1|none | 3|acc_norm |↑ |0.4680|± |0.0316|
|
231 |
+
| - leaderboard_bbh_causal_judgement | 1|none | 3|acc_norm |↑ |0.5187|± |0.0366|
|
232 |
+
| - leaderboard_bbh_date_understanding | 1|none | 3|acc_norm |↑ |0.2080|± |0.0257|
|
233 |
+
| - leaderboard_bbh_disambiguation_qa | 1|none | 3|acc_norm |↑ |0.3760|± |0.0307|
|
234 |
+
| - leaderboard_bbh_formal_fallacies | 1|none | 3|acc_norm |↑ |0.5320|± |0.0316|
|
235 |
+
| - leaderboard_bbh_geometric_shapes | 1|none | 3|acc_norm |↑ |0.1160|± |0.0203|
|
236 |
+
| - leaderboard_bbh_hyperbaton | 1|none | 3|acc_norm |↑ |0.5160|± |0.0317|
|
237 |
+
| - leaderboard_bbh_logical_deduction_five_objects | 1|none | 3|acc_norm |↑ |0.2000|± |0.0253|
|
238 |
+
| - leaderboard_bbh_logical_deduction_seven_objects | 1|none | 3|acc_norm |↑ |0.1280|± |0.0212|
|
239 |
+
| - leaderboard_bbh_logical_deduction_three_objects | 1|none | 3|acc_norm |↑ |0.3440|± |0.0301|
|
240 |
+
| - leaderboard_bbh_movie_recommendation | 1|none | 3|acc_norm |↑ |0.2400|± |0.0271|
|
241 |
+
| - leaderboard_bbh_navigate | 1|none | 3|acc_norm |↑ |0.4200|± |0.0313|
|
242 |
+
| - leaderboard_bbh_object_counting | 1|none | 3|acc_norm |↑ |0.0560|± |0.0146|
|
243 |
+
| - leaderboard_bbh_penguins_in_a_table | 1|none | 3|acc_norm |↑ |0.2260|± |0.0347|
|
244 |
+
| - leaderboard_bbh_reasoning_about_colored_objects | 1|none | 3|acc_norm |↑ |0.1520|± |0.0228|
|
245 |
+
| - leaderboard_bbh_ruin_names | 1|none | 3|acc_norm |↑ |0.2080|± |0.0257|
|
246 |
+
| - leaderboard_bbh_salient_translation_error_detection | 1|none | 3|acc_norm |↑ |0.2240|± |0.0264|
|
247 |
+
| - leaderboard_bbh_snarks | 1|none | 3|acc_norm |↑ |0.4831|± |0.0376|
|
248 |
+
| - leaderboard_bbh_sports_understanding | 1|none | 3|acc_norm |↑ |0.4640|± |0.0316|
|
249 |
+
| - leaderboard_bbh_temporal_sequences | 1|none | 3|acc_norm |↑ |0.2520|± |0.0275|
|
250 |
+
| - leaderboard_bbh_tracking_shuffled_objects_five_objects | 1|none | 3|acc_norm |↑ |0.1720|± |0.0239|
|
251 |
+
| - leaderboard_bbh_tracking_shuffled_objects_seven_objects| 1|none | 3|acc_norm |↑ |0.1480|± |0.0225|
|
252 |
+
| - leaderboard_bbh_tracking_shuffled_objects_three_objects| 1|none | 3|acc_norm |↑ |0.3320|± |0.0298|
|
253 |
+
| - leaderboard_bbh_web_of_lies | 1|none | 3|acc_norm |↑ |0.4880|± |0.0317|
|
254 |
+
| - leaderboard_gpqa | N/A| | | | | | | |
|
255 |
+
| - leaderboard_gpqa_diamond | 1|none | 0|acc_norm |↑ |0.2071|± |0.0289|
|
256 |
+
| - leaderboard_gpqa_extended | 1|none | 0|acc_norm |↑ |0.2619|± |0.0188|
|
257 |
+
| - leaderboard_gpqa_main | 1|none | 0|acc_norm |↑ |0.2545|± |0.0206|
|
258 |
+
| - leaderboard_ifeval | 3|none | 0|inst_level_loose_acc |↑ |0.2710|± | N/A|
|
259 |
+
| | |none | 0|inst_level_strict_acc |↑ |0.2626|± | N/A|
|
260 |
+
| | |none | 0|prompt_level_loose_acc |↑ |0.1165|± |0.0138|
|
261 |
+
| | |none | 0|prompt_level_strict_acc|↑ |0.1128|± |0.0136|
|
262 |
+
| - leaderboard_math_hard | N/A| | | | | | | |
|
263 |
+
| - leaderboard_math_algebra_hard | 2|none | 4|exact_match |↑ |0.0194|± |0.0040|
|
264 |
+
| - leaderboard_math_counting_and_prob_hard | 2|none | 4|exact_match |↑ |0.0148|± |0.0055|
|
265 |
+
| - leaderboard_math_geometry_hard | 2|none | 4|exact_match |↑ |0.0042|± |0.0029|
|
266 |
+
| - leaderboard_math_intermediate_algebra_hard | 2|none | 4|exact_match |↑ |0.0111|± |0.0035|
|
267 |
+
| - leaderboard_math_num_theory_hard | 2|none | 4|exact_match |↑ |0.0056|± |0.0032|
|
268 |
+
| - leaderboard_math_prealgebra_hard | 2|none | 4|exact_match |↑ |0.0161|± |0.0043|
|
269 |
+
| - leaderboard_math_precalculus_hard | 2|none | 4|exact_match |↑ |0.0092|± |0.0041|
|
270 |
+
| - leaderboard_mmlu_pro | 0.1|none | 5|acc |↑ |0.1184|± |0.0029|
|
271 |
+
| - leaderboard_musr | N/A| | | | | | | |
|
272 |
+
| - leaderboard_musr_murder_mysteries | 1|none | 0|acc_norm |↑ |0.5240|± |0.0316|
|
273 |
+
| - leaderboard_musr_object_placements | 1|none | 0|acc_norm |↑ |0.2344|± |0.0265|
|
274 |
+
| - leaderboard_musr_team_allocation | 1|none | 0|acc_norm |↑ |0.3000|± |0.0290|
|
275 |
+
```
|