Farouk commited on
Commit
1b4b4b5
β€’
1 Parent(s): 9c4a04f

Training in progress, step 8800

Browse files
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9a4f06a49bfdc391f60588e60dc50b17e1a132774226ad27f7a77af18c898d01
3
  size 319977229
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41bbe8a392b25ffc2919422c6cdf1817811f8a50007bc8663a3b59aa22412094
3
  size 319977229
checkpoint-6200/adapter_model/adapter_model/README.md CHANGED
@@ -125,6 +125,17 @@ The following `bitsandbytes` quantization config was used during training:
125
  - bnb_4bit_use_double_quant: True
126
  - bnb_4bit_compute_dtype: bfloat16
127
 
 
 
 
 
 
 
 
 
 
 
 
128
  The following `bitsandbytes` quantization config was used during training:
129
  - load_in_8bit: False
130
  - load_in_4bit: True
@@ -148,5 +159,6 @@ The following `bitsandbytes` quantization config was used during training:
148
  - PEFT 0.4.0
149
  - PEFT 0.4.0
150
  - PEFT 0.4.0
 
151
 
152
  - PEFT 0.4.0
 
125
  - bnb_4bit_use_double_quant: True
126
  - bnb_4bit_compute_dtype: bfloat16
127
 
128
+ The following `bitsandbytes` quantization config was used during training:
129
+ - load_in_8bit: False
130
+ - load_in_4bit: True
131
+ - llm_int8_threshold: 6.0
132
+ - llm_int8_skip_modules: None
133
+ - llm_int8_enable_fp32_cpu_offload: False
134
+ - llm_int8_has_fp16_weight: False
135
+ - bnb_4bit_quant_type: nf4
136
+ - bnb_4bit_use_double_quant: True
137
+ - bnb_4bit_compute_dtype: bfloat16
138
+
139
  The following `bitsandbytes` quantization config was used during training:
140
  - load_in_8bit: False
141
  - load_in_4bit: True
 
159
  - PEFT 0.4.0
160
  - PEFT 0.4.0
161
  - PEFT 0.4.0
162
+ - PEFT 0.4.0
163
 
164
  - PEFT 0.4.0
checkpoint-6200/adapter_model/adapter_model/adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4ced9e734f3fc12124fc782ca014e6dcf293347365a287b101ef9ffa2589d529
3
  size 319977229
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a4f06a49bfdc391f60588e60dc50b17e1a132774226ad27f7a77af18c898d01
3
  size 319977229
{checkpoint-6800 β†’ checkpoint-8800}/README.md RENAMED
File without changes
{checkpoint-6800 β†’ checkpoint-8800}/adapter_config.json RENAMED
File without changes
{checkpoint-6800 β†’ checkpoint-8800}/adapter_model.bin RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c675a65b503e840f932e3f288b4376af802192b8e5716a5c912624986d493663
3
  size 319977229
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41bbe8a392b25ffc2919422c6cdf1817811f8a50007bc8663a3b59aa22412094
3
  size 319977229
{checkpoint-6800 β†’ checkpoint-8800}/added_tokens.json RENAMED
File without changes
{checkpoint-6800 β†’ checkpoint-8800}/optimizer.pt RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bcfe5c0dc981023dcc95e882a56f5d526c81ec3b6dff58bcb2b4ae6044cb2421
3
  size 1279539973
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbdf2568c0db3d4de9fa4854ac46b58d8c6555f3c21b9a5f53bf67582a0bf52c
3
  size 1279539973
{checkpoint-6800 β†’ checkpoint-8800}/rng_state.pth RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:aaab1c5969cc1815061209603b2ae34c3218f1c4cc09d08b060fdcc99de82d46
3
  size 14511
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02bd4bd99fac2f29a37cb3cc9a750bb54a25ae7fc2c05a087715695d7c16f290
3
  size 14511
{checkpoint-6800 β†’ checkpoint-8800}/scheduler.pt RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:97c7e7304629a0d5b32d77774ba8769b4783d1dab8e735fe47e12df568c740c2
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c21e5e690d8e0cade5418c2fcaefb2d060d01ccdb64d9d6411a6d2dbbe882b0
3
  size 627
{checkpoint-6800 β†’ checkpoint-8800}/special_tokens_map.json RENAMED
File without changes
{checkpoint-6800 β†’ checkpoint-8800}/tokenizer.model RENAMED
File without changes
{checkpoint-6800 β†’ checkpoint-8800}/tokenizer_config.json RENAMED
File without changes
{checkpoint-6800 β†’ checkpoint-8800}/trainer_state.json RENAMED
@@ -1,8 +1,8 @@
1
  {
2
  "best_metric": 0.7293602228164673,
3
  "best_model_checkpoint": "experts/expert-16/checkpoint-6200",
4
- "epoch": 2.1546261089987326,
5
- "global_step": 6800,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -6500,11 +6500,1921 @@
6500
  "mmlu_eval_accuracy_world_religions": 0.631578947368421,
6501
  "mmlu_loss": 1.295992794337223,
6502
  "step": 6800
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6503
  }
6504
  ],
6505
  "max_steps": 10000,
6506
  "num_train_epochs": 4,
6507
- "total_flos": 2.0637949787377336e+18,
6508
  "trial_name": null,
6509
  "trial_params": null
6510
  }
 
1
  {
2
  "best_metric": 0.7293602228164673,
3
  "best_model_checkpoint": "experts/expert-16/checkpoint-6200",
4
+ "epoch": 2.788339670468948,
5
+ "global_step": 8800,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
 
6500
  "mmlu_eval_accuracy_world_religions": 0.631578947368421,
6501
  "mmlu_loss": 1.295992794337223,
6502
  "step": 6800
6503
+ },
6504
+ {
6505
+ "epoch": 2.16,
6506
+ "learning_rate": 0.0002,
6507
+ "loss": 0.5435,
6508
+ "step": 6810
6509
+ },
6510
+ {
6511
+ "epoch": 2.16,
6512
+ "learning_rate": 0.0002,
6513
+ "loss": 0.593,
6514
+ "step": 6820
6515
+ },
6516
+ {
6517
+ "epoch": 2.16,
6518
+ "learning_rate": 0.0002,
6519
+ "loss": 0.5898,
6520
+ "step": 6830
6521
+ },
6522
+ {
6523
+ "epoch": 2.17,
6524
+ "learning_rate": 0.0002,
6525
+ "loss": 0.5404,
6526
+ "step": 6840
6527
+ },
6528
+ {
6529
+ "epoch": 2.17,
6530
+ "learning_rate": 0.0002,
6531
+ "loss": 0.593,
6532
+ "step": 6850
6533
+ },
6534
+ {
6535
+ "epoch": 2.17,
6536
+ "learning_rate": 0.0002,
6537
+ "loss": 0.5832,
6538
+ "step": 6860
6539
+ },
6540
+ {
6541
+ "epoch": 2.18,
6542
+ "learning_rate": 0.0002,
6543
+ "loss": 0.6201,
6544
+ "step": 6870
6545
+ },
6546
+ {
6547
+ "epoch": 2.18,
6548
+ "learning_rate": 0.0002,
6549
+ "loss": 0.6147,
6550
+ "step": 6880
6551
+ },
6552
+ {
6553
+ "epoch": 2.18,
6554
+ "learning_rate": 0.0002,
6555
+ "loss": 0.6102,
6556
+ "step": 6890
6557
+ },
6558
+ {
6559
+ "epoch": 2.19,
6560
+ "learning_rate": 0.0002,
6561
+ "loss": 0.5885,
6562
+ "step": 6900
6563
+ },
6564
+ {
6565
+ "epoch": 2.19,
6566
+ "learning_rate": 0.0002,
6567
+ "loss": 0.5549,
6568
+ "step": 6910
6569
+ },
6570
+ {
6571
+ "epoch": 2.19,
6572
+ "learning_rate": 0.0002,
6573
+ "loss": 0.5973,
6574
+ "step": 6920
6575
+ },
6576
+ {
6577
+ "epoch": 2.2,
6578
+ "learning_rate": 0.0002,
6579
+ "loss": 0.589,
6580
+ "step": 6930
6581
+ },
6582
+ {
6583
+ "epoch": 2.2,
6584
+ "learning_rate": 0.0002,
6585
+ "loss": 0.6258,
6586
+ "step": 6940
6587
+ },
6588
+ {
6589
+ "epoch": 2.2,
6590
+ "learning_rate": 0.0002,
6591
+ "loss": 0.6038,
6592
+ "step": 6950
6593
+ },
6594
+ {
6595
+ "epoch": 2.21,
6596
+ "learning_rate": 0.0002,
6597
+ "loss": 0.5865,
6598
+ "step": 6960
6599
+ },
6600
+ {
6601
+ "epoch": 2.21,
6602
+ "learning_rate": 0.0002,
6603
+ "loss": 0.6355,
6604
+ "step": 6970
6605
+ },
6606
+ {
6607
+ "epoch": 2.21,
6608
+ "learning_rate": 0.0002,
6609
+ "loss": 0.6572,
6610
+ "step": 6980
6611
+ },
6612
+ {
6613
+ "epoch": 2.21,
6614
+ "learning_rate": 0.0002,
6615
+ "loss": 0.5367,
6616
+ "step": 6990
6617
+ },
6618
+ {
6619
+ "epoch": 2.22,
6620
+ "learning_rate": 0.0002,
6621
+ "loss": 0.5959,
6622
+ "step": 7000
6623
+ },
6624
+ {
6625
+ "epoch": 2.22,
6626
+ "eval_loss": 0.7645158767700195,
6627
+ "eval_runtime": 111.037,
6628
+ "eval_samples_per_second": 9.006,
6629
+ "eval_steps_per_second": 4.503,
6630
+ "step": 7000
6631
+ },
6632
+ {
6633
+ "epoch": 2.22,
6634
+ "mmlu_eval_accuracy": 0.478166482161635,
6635
+ "mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
6636
+ "mmlu_eval_accuracy_anatomy": 0.5,
6637
+ "mmlu_eval_accuracy_astronomy": 0.4375,
6638
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
6639
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
6640
+ "mmlu_eval_accuracy_college_biology": 0.375,
6641
+ "mmlu_eval_accuracy_college_chemistry": 0.375,
6642
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
6643
+ "mmlu_eval_accuracy_college_mathematics": 0.09090909090909091,
6644
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
6645
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
6646
+ "mmlu_eval_accuracy_computer_security": 0.5454545454545454,
6647
+ "mmlu_eval_accuracy_conceptual_physics": 0.46153846153846156,
6648
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
6649
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
6650
+ "mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637,
6651
+ "mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
6652
+ "mmlu_eval_accuracy_global_facts": 0.5,
6653
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
6654
+ "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
6655
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
6656
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
6657
+ "mmlu_eval_accuracy_high_school_geography": 0.8181818181818182,
6658
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
6659
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.46511627906976744,
6660
+ "mmlu_eval_accuracy_high_school_mathematics": 0.13793103448275862,
6661
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.5,
6662
+ "mmlu_eval_accuracy_high_school_physics": 0.058823529411764705,
6663
+ "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
6664
+ "mmlu_eval_accuracy_high_school_statistics": 0.2608695652173913,
6665
+ "mmlu_eval_accuracy_high_school_us_history": 0.6818181818181818,
6666
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
6667
+ "mmlu_eval_accuracy_human_aging": 0.782608695652174,
6668
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
6669
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
6670
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
6671
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
6672
+ "mmlu_eval_accuracy_machine_learning": 0.09090909090909091,
6673
+ "mmlu_eval_accuracy_management": 0.7272727272727273,
6674
+ "mmlu_eval_accuracy_marketing": 0.84,
6675
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
6676
+ "mmlu_eval_accuracy_miscellaneous": 0.6511627906976745,
6677
+ "mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
6678
+ "mmlu_eval_accuracy_moral_scenarios": 0.23,
6679
+ "mmlu_eval_accuracy_nutrition": 0.6060606060606061,
6680
+ "mmlu_eval_accuracy_philosophy": 0.5,
6681
+ "mmlu_eval_accuracy_prehistory": 0.5142857142857142,
6682
+ "mmlu_eval_accuracy_professional_accounting": 0.2903225806451613,
6683
+ "mmlu_eval_accuracy_professional_law": 0.35294117647058826,
6684
+ "mmlu_eval_accuracy_professional_medicine": 0.5161290322580645,
6685
+ "mmlu_eval_accuracy_professional_psychology": 0.463768115942029,
6686
+ "mmlu_eval_accuracy_public_relations": 0.5,
6687
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
6688
+ "mmlu_eval_accuracy_sociology": 0.6818181818181818,
6689
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
6690
+ "mmlu_eval_accuracy_virology": 0.5,
6691
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
6692
+ "mmlu_loss": 1.506881151358079,
6693
+ "step": 7000
6694
+ },
6695
+ {
6696
+ "epoch": 2.22,
6697
+ "learning_rate": 0.0002,
6698
+ "loss": 0.6429,
6699
+ "step": 7010
6700
+ },
6701
+ {
6702
+ "epoch": 2.22,
6703
+ "learning_rate": 0.0002,
6704
+ "loss": 0.5899,
6705
+ "step": 7020
6706
+ },
6707
+ {
6708
+ "epoch": 2.23,
6709
+ "learning_rate": 0.0002,
6710
+ "loss": 0.5661,
6711
+ "step": 7030
6712
+ },
6713
+ {
6714
+ "epoch": 2.23,
6715
+ "learning_rate": 0.0002,
6716
+ "loss": 0.5747,
6717
+ "step": 7040
6718
+ },
6719
+ {
6720
+ "epoch": 2.23,
6721
+ "learning_rate": 0.0002,
6722
+ "loss": 0.603,
6723
+ "step": 7050
6724
+ },
6725
+ {
6726
+ "epoch": 2.24,
6727
+ "learning_rate": 0.0002,
6728
+ "loss": 0.5864,
6729
+ "step": 7060
6730
+ },
6731
+ {
6732
+ "epoch": 2.24,
6733
+ "learning_rate": 0.0002,
6734
+ "loss": 0.588,
6735
+ "step": 7070
6736
+ },
6737
+ {
6738
+ "epoch": 2.24,
6739
+ "learning_rate": 0.0002,
6740
+ "loss": 0.6275,
6741
+ "step": 7080
6742
+ },
6743
+ {
6744
+ "epoch": 2.25,
6745
+ "learning_rate": 0.0002,
6746
+ "loss": 0.6118,
6747
+ "step": 7090
6748
+ },
6749
+ {
6750
+ "epoch": 2.25,
6751
+ "learning_rate": 0.0002,
6752
+ "loss": 0.6475,
6753
+ "step": 7100
6754
+ },
6755
+ {
6756
+ "epoch": 2.25,
6757
+ "learning_rate": 0.0002,
6758
+ "loss": 0.6191,
6759
+ "step": 7110
6760
+ },
6761
+ {
6762
+ "epoch": 2.26,
6763
+ "learning_rate": 0.0002,
6764
+ "loss": 0.5623,
6765
+ "step": 7120
6766
+ },
6767
+ {
6768
+ "epoch": 2.26,
6769
+ "learning_rate": 0.0002,
6770
+ "loss": 0.6052,
6771
+ "step": 7130
6772
+ },
6773
+ {
6774
+ "epoch": 2.26,
6775
+ "learning_rate": 0.0002,
6776
+ "loss": 0.545,
6777
+ "step": 7140
6778
+ },
6779
+ {
6780
+ "epoch": 2.27,
6781
+ "learning_rate": 0.0002,
6782
+ "loss": 0.5975,
6783
+ "step": 7150
6784
+ },
6785
+ {
6786
+ "epoch": 2.27,
6787
+ "learning_rate": 0.0002,
6788
+ "loss": 0.6022,
6789
+ "step": 7160
6790
+ },
6791
+ {
6792
+ "epoch": 2.27,
6793
+ "learning_rate": 0.0002,
6794
+ "loss": 0.608,
6795
+ "step": 7170
6796
+ },
6797
+ {
6798
+ "epoch": 2.28,
6799
+ "learning_rate": 0.0002,
6800
+ "loss": 0.6401,
6801
+ "step": 7180
6802
+ },
6803
+ {
6804
+ "epoch": 2.28,
6805
+ "learning_rate": 0.0002,
6806
+ "loss": 0.6429,
6807
+ "step": 7190
6808
+ },
6809
+ {
6810
+ "epoch": 2.28,
6811
+ "learning_rate": 0.0002,
6812
+ "loss": 0.5495,
6813
+ "step": 7200
6814
+ },
6815
+ {
6816
+ "epoch": 2.28,
6817
+ "eval_loss": 0.7578040361404419,
6818
+ "eval_runtime": 111.0662,
6819
+ "eval_samples_per_second": 9.004,
6820
+ "eval_steps_per_second": 4.502,
6821
+ "step": 7200
6822
+ },
6823
+ {
6824
+ "epoch": 2.28,
6825
+ "mmlu_eval_accuracy": 0.47051789661643223,
6826
+ "mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
6827
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
6828
+ "mmlu_eval_accuracy_astronomy": 0.4375,
6829
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
6830
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
6831
+ "mmlu_eval_accuracy_college_biology": 0.375,
6832
+ "mmlu_eval_accuracy_college_chemistry": 0.125,
6833
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
6834
+ "mmlu_eval_accuracy_college_mathematics": 0.09090909090909091,
6835
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
6836
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
6837
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
6838
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
6839
+ "mmlu_eval_accuracy_econometrics": 0.25,
6840
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
6841
+ "mmlu_eval_accuracy_elementary_mathematics": 0.2682926829268293,
6842
+ "mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
6843
+ "mmlu_eval_accuracy_global_facts": 0.3,
6844
+ "mmlu_eval_accuracy_high_school_biology": 0.34375,
6845
+ "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
6846
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
6847
+ "mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
6848
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
6849
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
6850
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.46511627906976744,
6851
+ "mmlu_eval_accuracy_high_school_mathematics": 0.1724137931034483,
6852
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.46153846153846156,
6853
+ "mmlu_eval_accuracy_high_school_physics": 0.0,
6854
+ "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
6855
+ "mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
6856
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
6857
+ "mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
6858
+ "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
6859
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
6860
+ "mmlu_eval_accuracy_international_law": 0.8461538461538461,
6861
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
6862
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
6863
+ "mmlu_eval_accuracy_machine_learning": 0.09090909090909091,
6864
+ "mmlu_eval_accuracy_management": 0.7272727272727273,
6865
+ "mmlu_eval_accuracy_marketing": 0.76,
6866
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
6867
+ "mmlu_eval_accuracy_miscellaneous": 0.6395348837209303,
6868
+ "mmlu_eval_accuracy_moral_disputes": 0.5,
6869
+ "mmlu_eval_accuracy_moral_scenarios": 0.25,
6870
+ "mmlu_eval_accuracy_nutrition": 0.6060606060606061,
6871
+ "mmlu_eval_accuracy_philosophy": 0.4411764705882353,
6872
+ "mmlu_eval_accuracy_prehistory": 0.5142857142857142,
6873
+ "mmlu_eval_accuracy_professional_accounting": 0.3548387096774194,
6874
+ "mmlu_eval_accuracy_professional_law": 0.3176470588235294,
6875
+ "mmlu_eval_accuracy_professional_medicine": 0.6451612903225806,
6876
+ "mmlu_eval_accuracy_professional_psychology": 0.5072463768115942,
6877
+ "mmlu_eval_accuracy_public_relations": 0.5,
6878
+ "mmlu_eval_accuracy_security_studies": 0.48148148148148145,
6879
+ "mmlu_eval_accuracy_sociology": 0.6818181818181818,
6880
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
6881
+ "mmlu_eval_accuracy_virology": 0.5555555555555556,
6882
+ "mmlu_eval_accuracy_world_religions": 0.631578947368421,
6883
+ "mmlu_loss": 1.5382918150944747,
6884
+ "step": 7200
6885
+ },
6886
+ {
6887
+ "epoch": 2.28,
6888
+ "learning_rate": 0.0002,
6889
+ "loss": 0.5606,
6890
+ "step": 7210
6891
+ },
6892
+ {
6893
+ "epoch": 2.29,
6894
+ "learning_rate": 0.0002,
6895
+ "loss": 0.5737,
6896
+ "step": 7220
6897
+ },
6898
+ {
6899
+ "epoch": 2.29,
6900
+ "learning_rate": 0.0002,
6901
+ "loss": 0.6112,
6902
+ "step": 7230
6903
+ },
6904
+ {
6905
+ "epoch": 2.29,
6906
+ "learning_rate": 0.0002,
6907
+ "loss": 0.626,
6908
+ "step": 7240
6909
+ },
6910
+ {
6911
+ "epoch": 2.3,
6912
+ "learning_rate": 0.0002,
6913
+ "loss": 0.608,
6914
+ "step": 7250
6915
+ },
6916
+ {
6917
+ "epoch": 2.3,
6918
+ "learning_rate": 0.0002,
6919
+ "loss": 0.6265,
6920
+ "step": 7260
6921
+ },
6922
+ {
6923
+ "epoch": 2.3,
6924
+ "learning_rate": 0.0002,
6925
+ "loss": 0.6053,
6926
+ "step": 7270
6927
+ },
6928
+ {
6929
+ "epoch": 2.31,
6930
+ "learning_rate": 0.0002,
6931
+ "loss": 0.6135,
6932
+ "step": 7280
6933
+ },
6934
+ {
6935
+ "epoch": 2.31,
6936
+ "learning_rate": 0.0002,
6937
+ "loss": 0.5217,
6938
+ "step": 7290
6939
+ },
6940
+ {
6941
+ "epoch": 2.31,
6942
+ "learning_rate": 0.0002,
6943
+ "loss": 0.6124,
6944
+ "step": 7300
6945
+ },
6946
+ {
6947
+ "epoch": 2.32,
6948
+ "learning_rate": 0.0002,
6949
+ "loss": 0.5506,
6950
+ "step": 7310
6951
+ },
6952
+ {
6953
+ "epoch": 2.32,
6954
+ "learning_rate": 0.0002,
6955
+ "loss": 0.6095,
6956
+ "step": 7320
6957
+ },
6958
+ {
6959
+ "epoch": 2.32,
6960
+ "learning_rate": 0.0002,
6961
+ "loss": 0.5972,
6962
+ "step": 7330
6963
+ },
6964
+ {
6965
+ "epoch": 2.33,
6966
+ "learning_rate": 0.0002,
6967
+ "loss": 0.6714,
6968
+ "step": 7340
6969
+ },
6970
+ {
6971
+ "epoch": 2.33,
6972
+ "learning_rate": 0.0002,
6973
+ "loss": 0.6083,
6974
+ "step": 7350
6975
+ },
6976
+ {
6977
+ "epoch": 2.33,
6978
+ "learning_rate": 0.0002,
6979
+ "loss": 0.6033,
6980
+ "step": 7360
6981
+ },
6982
+ {
6983
+ "epoch": 2.34,
6984
+ "learning_rate": 0.0002,
6985
+ "loss": 0.5881,
6986
+ "step": 7370
6987
+ },
6988
+ {
6989
+ "epoch": 2.34,
6990
+ "learning_rate": 0.0002,
6991
+ "loss": 0.5958,
6992
+ "step": 7380
6993
+ },
6994
+ {
6995
+ "epoch": 2.34,
6996
+ "learning_rate": 0.0002,
6997
+ "loss": 0.6009,
6998
+ "step": 7390
6999
+ },
7000
+ {
7001
+ "epoch": 2.34,
7002
+ "learning_rate": 0.0002,
7003
+ "loss": 0.5608,
7004
+ "step": 7400
7005
+ },
7006
+ {
7007
+ "epoch": 2.34,
7008
+ "eval_loss": 0.767185628414154,
7009
+ "eval_runtime": 111.2161,
7010
+ "eval_samples_per_second": 8.992,
7011
+ "eval_steps_per_second": 4.496,
7012
+ "step": 7400
7013
+ },
7014
+ {
7015
+ "epoch": 2.34,
7016
+ "mmlu_eval_accuracy": 0.46046773240416866,
7017
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
7018
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
7019
+ "mmlu_eval_accuracy_astronomy": 0.4375,
7020
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
7021
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
7022
+ "mmlu_eval_accuracy_college_biology": 0.375,
7023
+ "mmlu_eval_accuracy_college_chemistry": 0.125,
7024
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
7025
+ "mmlu_eval_accuracy_college_mathematics": 0.09090909090909091,
7026
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
7027
+ "mmlu_eval_accuracy_college_physics": 0.09090909090909091,
7028
+ "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
7029
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
7030
+ "mmlu_eval_accuracy_econometrics": 0.25,
7031
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
7032
+ "mmlu_eval_accuracy_elementary_mathematics": 0.2926829268292683,
7033
+ "mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
7034
+ "mmlu_eval_accuracy_global_facts": 0.3,
7035
+ "mmlu_eval_accuracy_high_school_biology": 0.3125,
7036
+ "mmlu_eval_accuracy_high_school_chemistry": 0.13636363636363635,
7037
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
7038
+ "mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
7039
+ "mmlu_eval_accuracy_high_school_geography": 0.8181818181818182,
7040
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
7041
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.46511627906976744,
7042
+ "mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
7043
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.46153846153846156,
7044
+ "mmlu_eval_accuracy_high_school_physics": 0.0,
7045
+ "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
7046
+ "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
7047
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
7048
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
7049
+ "mmlu_eval_accuracy_human_aging": 0.782608695652174,
7050
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
7051
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
7052
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
7053
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
7054
+ "mmlu_eval_accuracy_machine_learning": 0.09090909090909091,
7055
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
7056
+ "mmlu_eval_accuracy_marketing": 0.8,
7057
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
7058
+ "mmlu_eval_accuracy_miscellaneous": 0.627906976744186,
7059
+ "mmlu_eval_accuracy_moral_disputes": 0.5263157894736842,
7060
+ "mmlu_eval_accuracy_moral_scenarios": 0.27,
7061
+ "mmlu_eval_accuracy_nutrition": 0.5454545454545454,
7062
+ "mmlu_eval_accuracy_philosophy": 0.4117647058823529,
7063
+ "mmlu_eval_accuracy_prehistory": 0.5142857142857142,
7064
+ "mmlu_eval_accuracy_professional_accounting": 0.3548387096774194,
7065
+ "mmlu_eval_accuracy_professional_law": 0.3,
7066
+ "mmlu_eval_accuracy_professional_medicine": 0.6451612903225806,
7067
+ "mmlu_eval_accuracy_professional_psychology": 0.4782608695652174,
7068
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
7069
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
7070
+ "mmlu_eval_accuracy_sociology": 0.7272727272727273,
7071
+ "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
7072
+ "mmlu_eval_accuracy_virology": 0.5,
7073
+ "mmlu_eval_accuracy_world_religions": 0.631578947368421,
7074
+ "mmlu_loss": 1.5711101981040392,
7075
+ "step": 7400
7076
+ },
7077
+ {
7078
+ "epoch": 2.35,
7079
+ "learning_rate": 0.0002,
7080
+ "loss": 0.5974,
7081
+ "step": 7410
7082
+ },
7083
+ {
7084
+ "epoch": 2.35,
7085
+ "learning_rate": 0.0002,
7086
+ "loss": 0.5677,
7087
+ "step": 7420
7088
+ },
7089
+ {
7090
+ "epoch": 2.35,
7091
+ "learning_rate": 0.0002,
7092
+ "loss": 0.5592,
7093
+ "step": 7430
7094
+ },
7095
+ {
7096
+ "epoch": 2.36,
7097
+ "learning_rate": 0.0002,
7098
+ "loss": 0.5754,
7099
+ "step": 7440
7100
+ },
7101
+ {
7102
+ "epoch": 2.36,
7103
+ "learning_rate": 0.0002,
7104
+ "loss": 0.6117,
7105
+ "step": 7450
7106
+ },
7107
+ {
7108
+ "epoch": 2.36,
7109
+ "learning_rate": 0.0002,
7110
+ "loss": 0.5462,
7111
+ "step": 7460
7112
+ },
7113
+ {
7114
+ "epoch": 2.37,
7115
+ "learning_rate": 0.0002,
7116
+ "loss": 0.5888,
7117
+ "step": 7470
7118
+ },
7119
+ {
7120
+ "epoch": 2.37,
7121
+ "learning_rate": 0.0002,
7122
+ "loss": 0.5933,
7123
+ "step": 7480
7124
+ },
7125
+ {
7126
+ "epoch": 2.37,
7127
+ "learning_rate": 0.0002,
7128
+ "loss": 0.6329,
7129
+ "step": 7490
7130
+ },
7131
+ {
7132
+ "epoch": 2.38,
7133
+ "learning_rate": 0.0002,
7134
+ "loss": 0.6803,
7135
+ "step": 7500
7136
+ },
7137
+ {
7138
+ "epoch": 2.38,
7139
+ "learning_rate": 0.0002,
7140
+ "loss": 0.5907,
7141
+ "step": 7510
7142
+ },
7143
+ {
7144
+ "epoch": 2.38,
7145
+ "learning_rate": 0.0002,
7146
+ "loss": 0.5929,
7147
+ "step": 7520
7148
+ },
7149
+ {
7150
+ "epoch": 2.39,
7151
+ "learning_rate": 0.0002,
7152
+ "loss": 0.6288,
7153
+ "step": 7530
7154
+ },
7155
+ {
7156
+ "epoch": 2.39,
7157
+ "learning_rate": 0.0002,
7158
+ "loss": 0.5839,
7159
+ "step": 7540
7160
+ },
7161
+ {
7162
+ "epoch": 2.39,
7163
+ "learning_rate": 0.0002,
7164
+ "loss": 0.5886,
7165
+ "step": 7550
7166
+ },
7167
+ {
7168
+ "epoch": 2.4,
7169
+ "learning_rate": 0.0002,
7170
+ "loss": 0.6225,
7171
+ "step": 7560
7172
+ },
7173
+ {
7174
+ "epoch": 2.4,
7175
+ "learning_rate": 0.0002,
7176
+ "loss": 0.6009,
7177
+ "step": 7570
7178
+ },
7179
+ {
7180
+ "epoch": 2.4,
7181
+ "learning_rate": 0.0002,
7182
+ "loss": 0.5975,
7183
+ "step": 7580
7184
+ },
7185
+ {
7186
+ "epoch": 2.4,
7187
+ "learning_rate": 0.0002,
7188
+ "loss": 0.5581,
7189
+ "step": 7590
7190
+ },
7191
+ {
7192
+ "epoch": 2.41,
7193
+ "learning_rate": 0.0002,
7194
+ "loss": 0.612,
7195
+ "step": 7600
7196
+ },
7197
+ {
7198
+ "epoch": 2.41,
7199
+ "eval_loss": 0.76031494140625,
7200
+ "eval_runtime": 111.0399,
7201
+ "eval_samples_per_second": 9.006,
7202
+ "eval_steps_per_second": 4.503,
7203
+ "step": 7600
7204
+ },
7205
+ {
7206
+ "epoch": 2.41,
7207
+ "mmlu_eval_accuracy": 0.47951118911559576,
7208
+ "mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
7209
+ "mmlu_eval_accuracy_anatomy": 0.5714285714285714,
7210
+ "mmlu_eval_accuracy_astronomy": 0.5,
7211
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
7212
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
7213
+ "mmlu_eval_accuracy_college_biology": 0.4375,
7214
+ "mmlu_eval_accuracy_college_chemistry": 0.375,
7215
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
7216
+ "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
7217
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
7218
+ "mmlu_eval_accuracy_college_physics": 0.18181818181818182,
7219
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
7220
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
7221
+ "mmlu_eval_accuracy_econometrics": 0.25,
7222
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
7223
+ "mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637,
7224
+ "mmlu_eval_accuracy_formal_logic": 0.07142857142857142,
7225
+ "mmlu_eval_accuracy_global_facts": 0.5,
7226
+ "mmlu_eval_accuracy_high_school_biology": 0.40625,
7227
+ "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
7228
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
7229
+ "mmlu_eval_accuracy_high_school_european_history": 0.6666666666666666,
7230
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
7231
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
7232
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4418604651162791,
7233
+ "mmlu_eval_accuracy_high_school_mathematics": 0.1724137931034483,
7234
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.46153846153846156,
7235
+ "mmlu_eval_accuracy_high_school_physics": 0.0,
7236
+ "mmlu_eval_accuracy_high_school_psychology": 0.8833333333333333,
7237
+ "mmlu_eval_accuracy_high_school_statistics": 0.21739130434782608,
7238
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
7239
+ "mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
7240
+ "mmlu_eval_accuracy_human_aging": 0.782608695652174,
7241
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
7242
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
7243
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
7244
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
7245
+ "mmlu_eval_accuracy_machine_learning": 0.09090909090909091,
7246
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
7247
+ "mmlu_eval_accuracy_marketing": 0.8,
7248
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
7249
+ "mmlu_eval_accuracy_miscellaneous": 0.6395348837209303,
7250
+ "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
7251
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
7252
+ "mmlu_eval_accuracy_nutrition": 0.6666666666666666,
7253
+ "mmlu_eval_accuracy_philosophy": 0.5588235294117647,
7254
+ "mmlu_eval_accuracy_prehistory": 0.4857142857142857,
7255
+ "mmlu_eval_accuracy_professional_accounting": 0.3225806451612903,
7256
+ "mmlu_eval_accuracy_professional_law": 0.3352941176470588,
7257
+ "mmlu_eval_accuracy_professional_medicine": 0.5806451612903226,
7258
+ "mmlu_eval_accuracy_professional_psychology": 0.5217391304347826,
7259
+ "mmlu_eval_accuracy_public_relations": 0.5,
7260
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
7261
+ "mmlu_eval_accuracy_sociology": 0.6818181818181818,
7262
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
7263
+ "mmlu_eval_accuracy_virology": 0.5,
7264
+ "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
7265
+ "mmlu_loss": 1.584926052947891,
7266
+ "step": 7600
7267
+ },
7268
+ {
7269
+ "epoch": 2.41,
7270
+ "learning_rate": 0.0002,
7271
+ "loss": 0.5914,
7272
+ "step": 7610
7273
+ },
7274
+ {
7275
+ "epoch": 2.41,
7276
+ "learning_rate": 0.0002,
7277
+ "loss": 0.59,
7278
+ "step": 7620
7279
+ },
7280
+ {
7281
+ "epoch": 2.42,
7282
+ "learning_rate": 0.0002,
7283
+ "loss": 0.6179,
7284
+ "step": 7630
7285
+ },
7286
+ {
7287
+ "epoch": 2.42,
7288
+ "learning_rate": 0.0002,
7289
+ "loss": 0.6203,
7290
+ "step": 7640
7291
+ },
7292
+ {
7293
+ "epoch": 2.42,
7294
+ "learning_rate": 0.0002,
7295
+ "loss": 0.6113,
7296
+ "step": 7650
7297
+ },
7298
+ {
7299
+ "epoch": 2.43,
7300
+ "learning_rate": 0.0002,
7301
+ "loss": 0.5505,
7302
+ "step": 7660
7303
+ },
7304
+ {
7305
+ "epoch": 2.43,
7306
+ "learning_rate": 0.0002,
7307
+ "loss": 0.5664,
7308
+ "step": 7670
7309
+ },
7310
+ {
7311
+ "epoch": 2.43,
7312
+ "learning_rate": 0.0002,
7313
+ "loss": 0.596,
7314
+ "step": 7680
7315
+ },
7316
+ {
7317
+ "epoch": 2.44,
7318
+ "learning_rate": 0.0002,
7319
+ "loss": 0.6125,
7320
+ "step": 7690
7321
+ },
7322
+ {
7323
+ "epoch": 2.44,
7324
+ "learning_rate": 0.0002,
7325
+ "loss": 0.607,
7326
+ "step": 7700
7327
+ },
7328
+ {
7329
+ "epoch": 2.44,
7330
+ "learning_rate": 0.0002,
7331
+ "loss": 0.5657,
7332
+ "step": 7710
7333
+ },
7334
+ {
7335
+ "epoch": 2.45,
7336
+ "learning_rate": 0.0002,
7337
+ "loss": 0.5419,
7338
+ "step": 7720
7339
+ },
7340
+ {
7341
+ "epoch": 2.45,
7342
+ "learning_rate": 0.0002,
7343
+ "loss": 0.614,
7344
+ "step": 7730
7345
+ },
7346
+ {
7347
+ "epoch": 2.45,
7348
+ "learning_rate": 0.0002,
7349
+ "loss": 0.6107,
7350
+ "step": 7740
7351
+ },
7352
+ {
7353
+ "epoch": 2.46,
7354
+ "learning_rate": 0.0002,
7355
+ "loss": 0.6099,
7356
+ "step": 7750
7357
+ },
7358
+ {
7359
+ "epoch": 2.46,
7360
+ "learning_rate": 0.0002,
7361
+ "loss": 0.5994,
7362
+ "step": 7760
7363
+ },
7364
+ {
7365
+ "epoch": 2.46,
7366
+ "learning_rate": 0.0002,
7367
+ "loss": 0.6274,
7368
+ "step": 7770
7369
+ },
7370
+ {
7371
+ "epoch": 2.47,
7372
+ "learning_rate": 0.0002,
7373
+ "loss": 0.5902,
7374
+ "step": 7780
7375
+ },
7376
+ {
7377
+ "epoch": 2.47,
7378
+ "learning_rate": 0.0002,
7379
+ "loss": 0.5902,
7380
+ "step": 7790
7381
+ },
7382
+ {
7383
+ "epoch": 2.47,
7384
+ "learning_rate": 0.0002,
7385
+ "loss": 0.599,
7386
+ "step": 7800
7387
+ },
7388
+ {
7389
+ "epoch": 2.47,
7390
+ "eval_loss": 0.760485827922821,
7391
+ "eval_runtime": 111.1916,
7392
+ "eval_samples_per_second": 8.993,
7393
+ "eval_steps_per_second": 4.497,
7394
+ "step": 7800
7395
+ },
7396
+ {
7397
+ "epoch": 2.47,
7398
+ "mmlu_eval_accuracy": 0.48418694277386404,
7399
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
7400
+ "mmlu_eval_accuracy_anatomy": 0.5,
7401
+ "mmlu_eval_accuracy_astronomy": 0.4375,
7402
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
7403
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
7404
+ "mmlu_eval_accuracy_college_biology": 0.375,
7405
+ "mmlu_eval_accuracy_college_chemistry": 0.375,
7406
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
7407
+ "mmlu_eval_accuracy_college_mathematics": 0.09090909090909091,
7408
+ "mmlu_eval_accuracy_college_medicine": 0.5,
7409
+ "mmlu_eval_accuracy_college_physics": 0.2727272727272727,
7410
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
7411
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
7412
+ "mmlu_eval_accuracy_econometrics": 0.25,
7413
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
7414
+ "mmlu_eval_accuracy_elementary_mathematics": 0.36585365853658536,
7415
+ "mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
7416
+ "mmlu_eval_accuracy_global_facts": 0.5,
7417
+ "mmlu_eval_accuracy_high_school_biology": 0.40625,
7418
+ "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
7419
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
7420
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
7421
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
7422
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
7423
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.46511627906976744,
7424
+ "mmlu_eval_accuracy_high_school_mathematics": 0.1724137931034483,
7425
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.5,
7426
+ "mmlu_eval_accuracy_high_school_physics": 0.0,
7427
+ "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
7428
+ "mmlu_eval_accuracy_high_school_statistics": 0.2608695652173913,
7429
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
7430
+ "mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
7431
+ "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
7432
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
7433
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
7434
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
7435
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
7436
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
7437
+ "mmlu_eval_accuracy_management": 0.7272727272727273,
7438
+ "mmlu_eval_accuracy_marketing": 0.84,
7439
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
7440
+ "mmlu_eval_accuracy_miscellaneous": 0.627906976744186,
7441
+ "mmlu_eval_accuracy_moral_disputes": 0.5263157894736842,
7442
+ "mmlu_eval_accuracy_moral_scenarios": 0.27,
7443
+ "mmlu_eval_accuracy_nutrition": 0.6666666666666666,
7444
+ "mmlu_eval_accuracy_philosophy": 0.5588235294117647,
7445
+ "mmlu_eval_accuracy_prehistory": 0.5142857142857142,
7446
+ "mmlu_eval_accuracy_professional_accounting": 0.3225806451612903,
7447
+ "mmlu_eval_accuracy_professional_law": 0.34705882352941175,
7448
+ "mmlu_eval_accuracy_professional_medicine": 0.6129032258064516,
7449
+ "mmlu_eval_accuracy_professional_psychology": 0.4782608695652174,
7450
+ "mmlu_eval_accuracy_public_relations": 0.5,
7451
+ "mmlu_eval_accuracy_security_studies": 0.4074074074074074,
7452
+ "mmlu_eval_accuracy_sociology": 0.6363636363636364,
7453
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
7454
+ "mmlu_eval_accuracy_virology": 0.5,
7455
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
7456
+ "mmlu_loss": 1.4828916400204128,
7457
+ "step": 7800
7458
+ },
7459
+ {
7460
+ "epoch": 2.47,
7461
+ "learning_rate": 0.0002,
7462
+ "loss": 0.6005,
7463
+ "step": 7810
7464
+ },
7465
+ {
7466
+ "epoch": 2.48,
7467
+ "learning_rate": 0.0002,
7468
+ "loss": 0.6662,
7469
+ "step": 7820
7470
+ },
7471
+ {
7472
+ "epoch": 2.48,
7473
+ "learning_rate": 0.0002,
7474
+ "loss": 0.5821,
7475
+ "step": 7830
7476
+ },
7477
+ {
7478
+ "epoch": 2.48,
7479
+ "learning_rate": 0.0002,
7480
+ "loss": 0.5826,
7481
+ "step": 7840
7482
+ },
7483
+ {
7484
+ "epoch": 2.49,
7485
+ "learning_rate": 0.0002,
7486
+ "loss": 0.5804,
7487
+ "step": 7850
7488
+ },
7489
+ {
7490
+ "epoch": 2.49,
7491
+ "learning_rate": 0.0002,
7492
+ "loss": 0.587,
7493
+ "step": 7860
7494
+ },
7495
+ {
7496
+ "epoch": 2.49,
7497
+ "learning_rate": 0.0002,
7498
+ "loss": 0.6062,
7499
+ "step": 7870
7500
+ },
7501
+ {
7502
+ "epoch": 2.5,
7503
+ "learning_rate": 0.0002,
7504
+ "loss": 0.5616,
7505
+ "step": 7880
7506
+ },
7507
+ {
7508
+ "epoch": 2.5,
7509
+ "learning_rate": 0.0002,
7510
+ "loss": 0.6351,
7511
+ "step": 7890
7512
+ },
7513
+ {
7514
+ "epoch": 2.5,
7515
+ "learning_rate": 0.0002,
7516
+ "loss": 0.5738,
7517
+ "step": 7900
7518
+ },
7519
+ {
7520
+ "epoch": 2.51,
7521
+ "learning_rate": 0.0002,
7522
+ "loss": 0.5564,
7523
+ "step": 7910
7524
+ },
7525
+ {
7526
+ "epoch": 2.51,
7527
+ "learning_rate": 0.0002,
7528
+ "loss": 0.5696,
7529
+ "step": 7920
7530
+ },
7531
+ {
7532
+ "epoch": 2.51,
7533
+ "learning_rate": 0.0002,
7534
+ "loss": 0.5812,
7535
+ "step": 7930
7536
+ },
7537
+ {
7538
+ "epoch": 2.52,
7539
+ "learning_rate": 0.0002,
7540
+ "loss": 0.5786,
7541
+ "step": 7940
7542
+ },
7543
+ {
7544
+ "epoch": 2.52,
7545
+ "learning_rate": 0.0002,
7546
+ "loss": 0.6053,
7547
+ "step": 7950
7548
+ },
7549
+ {
7550
+ "epoch": 2.52,
7551
+ "learning_rate": 0.0002,
7552
+ "loss": 0.5727,
7553
+ "step": 7960
7554
+ },
7555
+ {
7556
+ "epoch": 2.53,
7557
+ "learning_rate": 0.0002,
7558
+ "loss": 0.621,
7559
+ "step": 7970
7560
+ },
7561
+ {
7562
+ "epoch": 2.53,
7563
+ "learning_rate": 0.0002,
7564
+ "loss": 0.5679,
7565
+ "step": 7980
7566
+ },
7567
+ {
7568
+ "epoch": 2.53,
7569
+ "learning_rate": 0.0002,
7570
+ "loss": 0.6138,
7571
+ "step": 7990
7572
+ },
7573
+ {
7574
+ "epoch": 2.53,
7575
+ "learning_rate": 0.0002,
7576
+ "loss": 0.588,
7577
+ "step": 8000
7578
+ },
7579
+ {
7580
+ "epoch": 2.53,
7581
+ "eval_loss": 0.7585816979408264,
7582
+ "eval_runtime": 111.2835,
7583
+ "eval_samples_per_second": 8.986,
7584
+ "eval_steps_per_second": 4.493,
7585
+ "step": 8000
7586
+ },
7587
+ {
7588
+ "epoch": 2.53,
7589
+ "mmlu_eval_accuracy": 0.48589851563960756,
7590
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
7591
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
7592
+ "mmlu_eval_accuracy_astronomy": 0.4375,
7593
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
7594
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
7595
+ "mmlu_eval_accuracy_college_biology": 0.375,
7596
+ "mmlu_eval_accuracy_college_chemistry": 0.375,
7597
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
7598
+ "mmlu_eval_accuracy_college_mathematics": 0.09090909090909091,
7599
+ "mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
7600
+ "mmlu_eval_accuracy_college_physics": 0.18181818181818182,
7601
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
7602
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
7603
+ "mmlu_eval_accuracy_econometrics": 0.25,
7604
+ "mmlu_eval_accuracy_electrical_engineering": 0.375,
7605
+ "mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637,
7606
+ "mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
7607
+ "mmlu_eval_accuracy_global_facts": 0.5,
7608
+ "mmlu_eval_accuracy_high_school_biology": 0.40625,
7609
+ "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
7610
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
7611
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
7612
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
7613
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
7614
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4883720930232558,
7615
+ "mmlu_eval_accuracy_high_school_mathematics": 0.1724137931034483,
7616
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.5384615384615384,
7617
+ "mmlu_eval_accuracy_high_school_physics": 0.0,
7618
+ "mmlu_eval_accuracy_high_school_psychology": 0.9,
7619
+ "mmlu_eval_accuracy_high_school_statistics": 0.391304347826087,
7620
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
7621
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
7622
+ "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
7623
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
7624
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
7625
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
7626
+ "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
7627
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
7628
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
7629
+ "mmlu_eval_accuracy_marketing": 0.68,
7630
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
7631
+ "mmlu_eval_accuracy_miscellaneous": 0.6627906976744186,
7632
+ "mmlu_eval_accuracy_moral_disputes": 0.5526315789473685,
7633
+ "mmlu_eval_accuracy_moral_scenarios": 0.25,
7634
+ "mmlu_eval_accuracy_nutrition": 0.48484848484848486,
7635
+ "mmlu_eval_accuracy_philosophy": 0.5,
7636
+ "mmlu_eval_accuracy_prehistory": 0.5428571428571428,
7637
+ "mmlu_eval_accuracy_professional_accounting": 0.3225806451612903,
7638
+ "mmlu_eval_accuracy_professional_law": 0.3588235294117647,
7639
+ "mmlu_eval_accuracy_professional_medicine": 0.5806451612903226,
7640
+ "mmlu_eval_accuracy_professional_psychology": 0.4927536231884058,
7641
+ "mmlu_eval_accuracy_public_relations": 0.5,
7642
+ "mmlu_eval_accuracy_security_studies": 0.4074074074074074,
7643
+ "mmlu_eval_accuracy_sociology": 0.7272727272727273,
7644
+ "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
7645
+ "mmlu_eval_accuracy_virology": 0.5,
7646
+ "mmlu_eval_accuracy_world_religions": 0.631578947368421,
7647
+ "mmlu_loss": 1.566373301120402,
7648
+ "step": 8000
7649
+ },
7650
+ {
7651
+ "epoch": 2.54,
7652
+ "learning_rate": 0.0002,
7653
+ "loss": 0.5624,
7654
+ "step": 8010
7655
+ },
7656
+ {
7657
+ "epoch": 2.54,
7658
+ "learning_rate": 0.0002,
7659
+ "loss": 0.6206,
7660
+ "step": 8020
7661
+ },
7662
+ {
7663
+ "epoch": 2.54,
7664
+ "learning_rate": 0.0002,
7665
+ "loss": 0.607,
7666
+ "step": 8030
7667
+ },
7668
+ {
7669
+ "epoch": 2.55,
7670
+ "learning_rate": 0.0002,
7671
+ "loss": 0.6344,
7672
+ "step": 8040
7673
+ },
7674
+ {
7675
+ "epoch": 2.55,
7676
+ "learning_rate": 0.0002,
7677
+ "loss": 0.6705,
7678
+ "step": 8050
7679
+ },
7680
+ {
7681
+ "epoch": 2.55,
7682
+ "learning_rate": 0.0002,
7683
+ "loss": 0.5679,
7684
+ "step": 8060
7685
+ },
7686
+ {
7687
+ "epoch": 2.56,
7688
+ "learning_rate": 0.0002,
7689
+ "loss": 0.6,
7690
+ "step": 8070
7691
+ },
7692
+ {
7693
+ "epoch": 2.56,
7694
+ "learning_rate": 0.0002,
7695
+ "loss": 0.6486,
7696
+ "step": 8080
7697
+ },
7698
+ {
7699
+ "epoch": 2.56,
7700
+ "learning_rate": 0.0002,
7701
+ "loss": 0.5959,
7702
+ "step": 8090
7703
+ },
7704
+ {
7705
+ "epoch": 2.57,
7706
+ "learning_rate": 0.0002,
7707
+ "loss": 0.6454,
7708
+ "step": 8100
7709
+ },
7710
+ {
7711
+ "epoch": 2.57,
7712
+ "learning_rate": 0.0002,
7713
+ "loss": 0.6085,
7714
+ "step": 8110
7715
+ },
7716
+ {
7717
+ "epoch": 2.57,
7718
+ "learning_rate": 0.0002,
7719
+ "loss": 0.5509,
7720
+ "step": 8120
7721
+ },
7722
+ {
7723
+ "epoch": 2.58,
7724
+ "learning_rate": 0.0002,
7725
+ "loss": 0.6267,
7726
+ "step": 8130
7727
+ },
7728
+ {
7729
+ "epoch": 2.58,
7730
+ "learning_rate": 0.0002,
7731
+ "loss": 0.5865,
7732
+ "step": 8140
7733
+ },
7734
+ {
7735
+ "epoch": 2.58,
7736
+ "learning_rate": 0.0002,
7737
+ "loss": 0.6002,
7738
+ "step": 8150
7739
+ },
7740
+ {
7741
+ "epoch": 2.59,
7742
+ "learning_rate": 0.0002,
7743
+ "loss": 0.6342,
7744
+ "step": 8160
7745
+ },
7746
+ {
7747
+ "epoch": 2.59,
7748
+ "learning_rate": 0.0002,
7749
+ "loss": 0.6312,
7750
+ "step": 8170
7751
+ },
7752
+ {
7753
+ "epoch": 2.59,
7754
+ "learning_rate": 0.0002,
7755
+ "loss": 0.6361,
7756
+ "step": 8180
7757
+ },
7758
+ {
7759
+ "epoch": 2.6,
7760
+ "learning_rate": 0.0002,
7761
+ "loss": 0.5676,
7762
+ "step": 8190
7763
+ },
7764
+ {
7765
+ "epoch": 2.6,
7766
+ "learning_rate": 0.0002,
7767
+ "loss": 0.6125,
7768
+ "step": 8200
7769
+ },
7770
+ {
7771
+ "epoch": 2.6,
7772
+ "eval_loss": 0.7568719387054443,
7773
+ "eval_runtime": 111.2374,
7774
+ "eval_samples_per_second": 8.99,
7775
+ "eval_steps_per_second": 4.495,
7776
+ "step": 8200
7777
+ },
7778
+ {
7779
+ "epoch": 2.6,
7780
+ "mmlu_eval_accuracy": 0.4699982014237092,
7781
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
7782
+ "mmlu_eval_accuracy_anatomy": 0.5714285714285714,
7783
+ "mmlu_eval_accuracy_astronomy": 0.4375,
7784
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
7785
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5517241379310345,
7786
+ "mmlu_eval_accuracy_college_biology": 0.375,
7787
+ "mmlu_eval_accuracy_college_chemistry": 0.375,
7788
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
7789
+ "mmlu_eval_accuracy_college_mathematics": 0.09090909090909091,
7790
+ "mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
7791
+ "mmlu_eval_accuracy_college_physics": 0.2727272727272727,
7792
+ "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
7793
+ "mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464,
7794
+ "mmlu_eval_accuracy_econometrics": 0.25,
7795
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
7796
+ "mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637,
7797
+ "mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
7798
+ "mmlu_eval_accuracy_global_facts": 0.3,
7799
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
7800
+ "mmlu_eval_accuracy_high_school_chemistry": 0.2727272727272727,
7801
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
7802
+ "mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
7803
+ "mmlu_eval_accuracy_high_school_geography": 0.8181818181818182,
7804
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
7805
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.46511627906976744,
7806
+ "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
7807
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.46153846153846156,
7808
+ "mmlu_eval_accuracy_high_school_physics": 0.058823529411764705,
7809
+ "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
7810
+ "mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
7811
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
7812
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
7813
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
7814
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
7815
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
7816
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
7817
+ "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
7818
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
7819
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
7820
+ "mmlu_eval_accuracy_marketing": 0.68,
7821
+ "mmlu_eval_accuracy_medical_genetics": 0.7272727272727273,
7822
+ "mmlu_eval_accuracy_miscellaneous": 0.6744186046511628,
7823
+ "mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
7824
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
7825
+ "mmlu_eval_accuracy_nutrition": 0.6060606060606061,
7826
+ "mmlu_eval_accuracy_philosophy": 0.47058823529411764,
7827
+ "mmlu_eval_accuracy_prehistory": 0.5142857142857142,
7828
+ "mmlu_eval_accuracy_professional_accounting": 0.2903225806451613,
7829
+ "mmlu_eval_accuracy_professional_law": 0.3588235294117647,
7830
+ "mmlu_eval_accuracy_professional_medicine": 0.5806451612903226,
7831
+ "mmlu_eval_accuracy_professional_psychology": 0.5072463768115942,
7832
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
7833
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
7834
+ "mmlu_eval_accuracy_sociology": 0.6818181818181818,
7835
+ "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
7836
+ "mmlu_eval_accuracy_virology": 0.5,
7837
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
7838
+ "mmlu_loss": 1.3685555242527248,
7839
+ "step": 8200
7840
+ },
7841
+ {
7842
+ "epoch": 2.6,
7843
+ "learning_rate": 0.0002,
7844
+ "loss": 0.5992,
7845
+ "step": 8210
7846
+ },
7847
+ {
7848
+ "epoch": 2.6,
7849
+ "learning_rate": 0.0002,
7850
+ "loss": 0.6068,
7851
+ "step": 8220
7852
+ },
7853
+ {
7854
+ "epoch": 2.61,
7855
+ "learning_rate": 0.0002,
7856
+ "loss": 0.6986,
7857
+ "step": 8230
7858
+ },
7859
+ {
7860
+ "epoch": 2.61,
7861
+ "learning_rate": 0.0002,
7862
+ "loss": 0.5809,
7863
+ "step": 8240
7864
+ },
7865
+ {
7866
+ "epoch": 2.61,
7867
+ "learning_rate": 0.0002,
7868
+ "loss": 0.6368,
7869
+ "step": 8250
7870
+ },
7871
+ {
7872
+ "epoch": 2.62,
7873
+ "learning_rate": 0.0002,
7874
+ "loss": 0.5731,
7875
+ "step": 8260
7876
+ },
7877
+ {
7878
+ "epoch": 2.62,
7879
+ "learning_rate": 0.0002,
7880
+ "loss": 0.6439,
7881
+ "step": 8270
7882
+ },
7883
+ {
7884
+ "epoch": 2.62,
7885
+ "learning_rate": 0.0002,
7886
+ "loss": 0.5661,
7887
+ "step": 8280
7888
+ },
7889
+ {
7890
+ "epoch": 2.63,
7891
+ "learning_rate": 0.0002,
7892
+ "loss": 0.5816,
7893
+ "step": 8290
7894
+ },
7895
+ {
7896
+ "epoch": 2.63,
7897
+ "learning_rate": 0.0002,
7898
+ "loss": 0.5385,
7899
+ "step": 8300
7900
+ },
7901
+ {
7902
+ "epoch": 2.63,
7903
+ "learning_rate": 0.0002,
7904
+ "loss": 0.5913,
7905
+ "step": 8310
7906
+ },
7907
+ {
7908
+ "epoch": 2.64,
7909
+ "learning_rate": 0.0002,
7910
+ "loss": 0.5817,
7911
+ "step": 8320
7912
+ },
7913
+ {
7914
+ "epoch": 2.64,
7915
+ "learning_rate": 0.0002,
7916
+ "loss": 0.6098,
7917
+ "step": 8330
7918
+ },
7919
+ {
7920
+ "epoch": 2.64,
7921
+ "learning_rate": 0.0002,
7922
+ "loss": 0.558,
7923
+ "step": 8340
7924
+ },
7925
+ {
7926
+ "epoch": 2.65,
7927
+ "learning_rate": 0.0002,
7928
+ "loss": 0.6008,
7929
+ "step": 8350
7930
+ },
7931
+ {
7932
+ "epoch": 2.65,
7933
+ "learning_rate": 0.0002,
7934
+ "loss": 0.5921,
7935
+ "step": 8360
7936
+ },
7937
+ {
7938
+ "epoch": 2.65,
7939
+ "learning_rate": 0.0002,
7940
+ "loss": 0.6194,
7941
+ "step": 8370
7942
+ },
7943
+ {
7944
+ "epoch": 2.66,
7945
+ "learning_rate": 0.0002,
7946
+ "loss": 0.6849,
7947
+ "step": 8380
7948
+ },
7949
+ {
7950
+ "epoch": 2.66,
7951
+ "learning_rate": 0.0002,
7952
+ "loss": 0.5851,
7953
+ "step": 8390
7954
+ },
7955
+ {
7956
+ "epoch": 2.66,
7957
+ "learning_rate": 0.0002,
7958
+ "loss": 0.5574,
7959
+ "step": 8400
7960
+ },
7961
+ {
7962
+ "epoch": 2.66,
7963
+ "eval_loss": 0.7574586868286133,
7964
+ "eval_runtime": 111.0853,
7965
+ "eval_samples_per_second": 9.002,
7966
+ "eval_steps_per_second": 4.501,
7967
+ "step": 8400
7968
+ },
7969
+ {
7970
+ "epoch": 2.66,
7971
+ "mmlu_eval_accuracy": 0.47813110611906134,
7972
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
7973
+ "mmlu_eval_accuracy_anatomy": 0.5,
7974
+ "mmlu_eval_accuracy_astronomy": 0.4375,
7975
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
7976
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5517241379310345,
7977
+ "mmlu_eval_accuracy_college_biology": 0.375,
7978
+ "mmlu_eval_accuracy_college_chemistry": 0.375,
7979
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
7980
+ "mmlu_eval_accuracy_college_mathematics": 0.09090909090909091,
7981
+ "mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
7982
+ "mmlu_eval_accuracy_college_physics": 0.18181818181818182,
7983
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
7984
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
7985
+ "mmlu_eval_accuracy_econometrics": 0.3333333333333333,
7986
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
7987
+ "mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
7988
+ "mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
7989
+ "mmlu_eval_accuracy_global_facts": 0.3,
7990
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
7991
+ "mmlu_eval_accuracy_high_school_chemistry": 0.2727272727272727,
7992
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
7993
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
7994
+ "mmlu_eval_accuracy_high_school_geography": 0.8181818181818182,
7995
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
7996
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4883720930232558,
7997
+ "mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
7998
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.5384615384615384,
7999
+ "mmlu_eval_accuracy_high_school_physics": 0.058823529411764705,
8000
+ "mmlu_eval_accuracy_high_school_psychology": 0.8833333333333333,
8001
+ "mmlu_eval_accuracy_high_school_statistics": 0.391304347826087,
8002
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
8003
+ "mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
8004
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
8005
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
8006
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
8007
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
8008
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
8009
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
8010
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
8011
+ "mmlu_eval_accuracy_marketing": 0.76,
8012
+ "mmlu_eval_accuracy_medical_genetics": 0.8181818181818182,
8013
+ "mmlu_eval_accuracy_miscellaneous": 0.6395348837209303,
8014
+ "mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
8015
+ "mmlu_eval_accuracy_moral_scenarios": 0.25,
8016
+ "mmlu_eval_accuracy_nutrition": 0.6363636363636364,
8017
+ "mmlu_eval_accuracy_philosophy": 0.47058823529411764,
8018
+ "mmlu_eval_accuracy_prehistory": 0.5142857142857142,
8019
+ "mmlu_eval_accuracy_professional_accounting": 0.3225806451612903,
8020
+ "mmlu_eval_accuracy_professional_law": 0.3588235294117647,
8021
+ "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
8022
+ "mmlu_eval_accuracy_professional_psychology": 0.5072463768115942,
8023
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
8024
+ "mmlu_eval_accuracy_security_studies": 0.4074074074074074,
8025
+ "mmlu_eval_accuracy_sociology": 0.6818181818181818,
8026
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
8027
+ "mmlu_eval_accuracy_virology": 0.5,
8028
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
8029
+ "mmlu_loss": 1.4071070384574622,
8030
+ "step": 8400
8031
+ },
8032
+ {
8033
+ "epoch": 2.66,
8034
+ "learning_rate": 0.0002,
8035
+ "loss": 0.5795,
8036
+ "step": 8410
8037
+ },
8038
+ {
8039
+ "epoch": 2.67,
8040
+ "learning_rate": 0.0002,
8041
+ "loss": 0.6109,
8042
+ "step": 8420
8043
+ },
8044
+ {
8045
+ "epoch": 2.67,
8046
+ "learning_rate": 0.0002,
8047
+ "loss": 0.6136,
8048
+ "step": 8430
8049
+ },
8050
+ {
8051
+ "epoch": 2.67,
8052
+ "learning_rate": 0.0002,
8053
+ "loss": 0.5795,
8054
+ "step": 8440
8055
+ },
8056
+ {
8057
+ "epoch": 2.68,
8058
+ "learning_rate": 0.0002,
8059
+ "loss": 0.5639,
8060
+ "step": 8450
8061
+ },
8062
+ {
8063
+ "epoch": 2.68,
8064
+ "learning_rate": 0.0002,
8065
+ "loss": 0.5869,
8066
+ "step": 8460
8067
+ },
8068
+ {
8069
+ "epoch": 2.68,
8070
+ "learning_rate": 0.0002,
8071
+ "loss": 0.5946,
8072
+ "step": 8470
8073
+ },
8074
+ {
8075
+ "epoch": 2.69,
8076
+ "learning_rate": 0.0002,
8077
+ "loss": 0.5745,
8078
+ "step": 8480
8079
+ },
8080
+ {
8081
+ "epoch": 2.69,
8082
+ "learning_rate": 0.0002,
8083
+ "loss": 0.573,
8084
+ "step": 8490
8085
+ },
8086
+ {
8087
+ "epoch": 2.69,
8088
+ "learning_rate": 0.0002,
8089
+ "loss": 0.5846,
8090
+ "step": 8500
8091
+ },
8092
+ {
8093
+ "epoch": 2.7,
8094
+ "learning_rate": 0.0002,
8095
+ "loss": 0.6058,
8096
+ "step": 8510
8097
+ },
8098
+ {
8099
+ "epoch": 2.7,
8100
+ "learning_rate": 0.0002,
8101
+ "loss": 0.5072,
8102
+ "step": 8520
8103
+ },
8104
+ {
8105
+ "epoch": 2.7,
8106
+ "learning_rate": 0.0002,
8107
+ "loss": 0.6296,
8108
+ "step": 8530
8109
+ },
8110
+ {
8111
+ "epoch": 2.71,
8112
+ "learning_rate": 0.0002,
8113
+ "loss": 0.6057,
8114
+ "step": 8540
8115
+ },
8116
+ {
8117
+ "epoch": 2.71,
8118
+ "learning_rate": 0.0002,
8119
+ "loss": 0.544,
8120
+ "step": 8550
8121
+ },
8122
+ {
8123
+ "epoch": 2.71,
8124
+ "learning_rate": 0.0002,
8125
+ "loss": 0.6256,
8126
+ "step": 8560
8127
+ },
8128
+ {
8129
+ "epoch": 2.72,
8130
+ "learning_rate": 0.0002,
8131
+ "loss": 0.6307,
8132
+ "step": 8570
8133
+ },
8134
+ {
8135
+ "epoch": 2.72,
8136
+ "learning_rate": 0.0002,
8137
+ "loss": 0.5717,
8138
+ "step": 8580
8139
+ },
8140
+ {
8141
+ "epoch": 2.72,
8142
+ "learning_rate": 0.0002,
8143
+ "loss": 0.5946,
8144
+ "step": 8590
8145
+ },
8146
+ {
8147
+ "epoch": 2.72,
8148
+ "learning_rate": 0.0002,
8149
+ "loss": 0.6025,
8150
+ "step": 8600
8151
+ },
8152
+ {
8153
+ "epoch": 2.72,
8154
+ "eval_loss": 0.7557567358016968,
8155
+ "eval_runtime": 111.5506,
8156
+ "eval_samples_per_second": 8.965,
8157
+ "eval_steps_per_second": 4.482,
8158
+ "step": 8600
8159
+ },
8160
+ {
8161
+ "epoch": 2.72,
8162
+ "mmlu_eval_accuracy": 0.4768331170577644,
8163
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
8164
+ "mmlu_eval_accuracy_anatomy": 0.5714285714285714,
8165
+ "mmlu_eval_accuracy_astronomy": 0.4375,
8166
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
8167
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5862068965517241,
8168
+ "mmlu_eval_accuracy_college_biology": 0.375,
8169
+ "mmlu_eval_accuracy_college_chemistry": 0.375,
8170
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
8171
+ "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
8172
+ "mmlu_eval_accuracy_college_medicine": 0.5,
8173
+ "mmlu_eval_accuracy_college_physics": 0.18181818181818182,
8174
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
8175
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
8176
+ "mmlu_eval_accuracy_econometrics": 0.3333333333333333,
8177
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
8178
+ "mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637,
8179
+ "mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
8180
+ "mmlu_eval_accuracy_global_facts": 0.3,
8181
+ "mmlu_eval_accuracy_high_school_biology": 0.40625,
8182
+ "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
8183
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
8184
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
8185
+ "mmlu_eval_accuracy_high_school_geography": 0.7727272727272727,
8186
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
8187
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4883720930232558,
8188
+ "mmlu_eval_accuracy_high_school_mathematics": 0.1724137931034483,
8189
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.5,
8190
+ "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
8191
+ "mmlu_eval_accuracy_high_school_psychology": 0.9,
8192
+ "mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
8193
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
8194
+ "mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
8195
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
8196
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
8197
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
8198
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
8199
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
8200
+ "mmlu_eval_accuracy_machine_learning": 0.09090909090909091,
8201
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
8202
+ "mmlu_eval_accuracy_marketing": 0.76,
8203
+ "mmlu_eval_accuracy_medical_genetics": 0.8181818181818182,
8204
+ "mmlu_eval_accuracy_miscellaneous": 0.6395348837209303,
8205
+ "mmlu_eval_accuracy_moral_disputes": 0.5263157894736842,
8206
+ "mmlu_eval_accuracy_moral_scenarios": 0.23,
8207
+ "mmlu_eval_accuracy_nutrition": 0.6363636363636364,
8208
+ "mmlu_eval_accuracy_philosophy": 0.47058823529411764,
8209
+ "mmlu_eval_accuracy_prehistory": 0.5428571428571428,
8210
+ "mmlu_eval_accuracy_professional_accounting": 0.2903225806451613,
8211
+ "mmlu_eval_accuracy_professional_law": 0.3352941176470588,
8212
+ "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
8213
+ "mmlu_eval_accuracy_professional_psychology": 0.4782608695652174,
8214
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
8215
+ "mmlu_eval_accuracy_security_studies": 0.4074074074074074,
8216
+ "mmlu_eval_accuracy_sociology": 0.6363636363636364,
8217
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
8218
+ "mmlu_eval_accuracy_virology": 0.5,
8219
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
8220
+ "mmlu_loss": 1.4071306704152036,
8221
+ "step": 8600
8222
+ },
8223
+ {
8224
+ "epoch": 2.73,
8225
+ "learning_rate": 0.0002,
8226
+ "loss": 0.6607,
8227
+ "step": 8610
8228
+ },
8229
+ {
8230
+ "epoch": 2.73,
8231
+ "learning_rate": 0.0002,
8232
+ "loss": 0.6087,
8233
+ "step": 8620
8234
+ },
8235
+ {
8236
+ "epoch": 2.73,
8237
+ "learning_rate": 0.0002,
8238
+ "loss": 0.6077,
8239
+ "step": 8630
8240
+ },
8241
+ {
8242
+ "epoch": 2.74,
8243
+ "learning_rate": 0.0002,
8244
+ "loss": 0.5746,
8245
+ "step": 8640
8246
+ },
8247
+ {
8248
+ "epoch": 2.74,
8249
+ "learning_rate": 0.0002,
8250
+ "loss": 0.6372,
8251
+ "step": 8650
8252
+ },
8253
+ {
8254
+ "epoch": 2.74,
8255
+ "learning_rate": 0.0002,
8256
+ "loss": 0.603,
8257
+ "step": 8660
8258
+ },
8259
+ {
8260
+ "epoch": 2.75,
8261
+ "learning_rate": 0.0002,
8262
+ "loss": 0.5913,
8263
+ "step": 8670
8264
+ },
8265
+ {
8266
+ "epoch": 2.75,
8267
+ "learning_rate": 0.0002,
8268
+ "loss": 0.664,
8269
+ "step": 8680
8270
+ },
8271
+ {
8272
+ "epoch": 2.75,
8273
+ "learning_rate": 0.0002,
8274
+ "loss": 0.5766,
8275
+ "step": 8690
8276
+ },
8277
+ {
8278
+ "epoch": 2.76,
8279
+ "learning_rate": 0.0002,
8280
+ "loss": 0.6316,
8281
+ "step": 8700
8282
+ },
8283
+ {
8284
+ "epoch": 2.76,
8285
+ "learning_rate": 0.0002,
8286
+ "loss": 0.5913,
8287
+ "step": 8710
8288
+ },
8289
+ {
8290
+ "epoch": 2.76,
8291
+ "learning_rate": 0.0002,
8292
+ "loss": 0.5974,
8293
+ "step": 8720
8294
+ },
8295
+ {
8296
+ "epoch": 2.77,
8297
+ "learning_rate": 0.0002,
8298
+ "loss": 0.6519,
8299
+ "step": 8730
8300
+ },
8301
+ {
8302
+ "epoch": 2.77,
8303
+ "learning_rate": 0.0002,
8304
+ "loss": 0.6502,
8305
+ "step": 8740
8306
+ },
8307
+ {
8308
+ "epoch": 2.77,
8309
+ "learning_rate": 0.0002,
8310
+ "loss": 0.6069,
8311
+ "step": 8750
8312
+ },
8313
+ {
8314
+ "epoch": 2.78,
8315
+ "learning_rate": 0.0002,
8316
+ "loss": 0.6073,
8317
+ "step": 8760
8318
+ },
8319
+ {
8320
+ "epoch": 2.78,
8321
+ "learning_rate": 0.0002,
8322
+ "loss": 0.5314,
8323
+ "step": 8770
8324
+ },
8325
+ {
8326
+ "epoch": 2.78,
8327
+ "learning_rate": 0.0002,
8328
+ "loss": 0.6515,
8329
+ "step": 8780
8330
+ },
8331
+ {
8332
+ "epoch": 2.79,
8333
+ "learning_rate": 0.0002,
8334
+ "loss": 0.6515,
8335
+ "step": 8790
8336
+ },
8337
+ {
8338
+ "epoch": 2.79,
8339
+ "learning_rate": 0.0002,
8340
+ "loss": 0.6188,
8341
+ "step": 8800
8342
+ },
8343
+ {
8344
+ "epoch": 2.79,
8345
+ "eval_loss": 0.7575909495353699,
8346
+ "eval_runtime": 111.2733,
8347
+ "eval_samples_per_second": 8.987,
8348
+ "eval_steps_per_second": 4.493,
8349
+ "step": 8800
8350
+ },
8351
+ {
8352
+ "epoch": 2.79,
8353
+ "mmlu_eval_accuracy": 0.489029874622582,
8354
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
8355
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
8356
+ "mmlu_eval_accuracy_astronomy": 0.375,
8357
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
8358
+ "mmlu_eval_accuracy_clinical_knowledge": 0.6206896551724138,
8359
+ "mmlu_eval_accuracy_college_biology": 0.375,
8360
+ "mmlu_eval_accuracy_college_chemistry": 0.375,
8361
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
8362
+ "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
8363
+ "mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
8364
+ "mmlu_eval_accuracy_college_physics": 0.2727272727272727,
8365
+ "mmlu_eval_accuracy_computer_security": 0.5454545454545454,
8366
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
8367
+ "mmlu_eval_accuracy_econometrics": 0.25,
8368
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
8369
+ "mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
8370
+ "mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
8371
+ "mmlu_eval_accuracy_global_facts": 0.5,
8372
+ "mmlu_eval_accuracy_high_school_biology": 0.40625,
8373
+ "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
8374
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
8375
+ "mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
8376
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
8377
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
8378
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.5116279069767442,
8379
+ "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
8380
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231,
8381
+ "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
8382
+ "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
8383
+ "mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
8384
+ "mmlu_eval_accuracy_high_school_us_history": 0.6818181818181818,
8385
+ "mmlu_eval_accuracy_high_school_world_history": 0.6538461538461539,
8386
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
8387
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
8388
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
8389
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
8390
+ "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
8391
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
8392
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
8393
+ "mmlu_eval_accuracy_marketing": 0.68,
8394
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
8395
+ "mmlu_eval_accuracy_miscellaneous": 0.6395348837209303,
8396
+ "mmlu_eval_accuracy_moral_disputes": 0.5,
8397
+ "mmlu_eval_accuracy_moral_scenarios": 0.27,
8398
+ "mmlu_eval_accuracy_nutrition": 0.6060606060606061,
8399
+ "mmlu_eval_accuracy_philosophy": 0.5,
8400
+ "mmlu_eval_accuracy_prehistory": 0.6285714285714286,
8401
+ "mmlu_eval_accuracy_professional_accounting": 0.3548387096774194,
8402
+ "mmlu_eval_accuracy_professional_law": 0.3588235294117647,
8403
+ "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
8404
+ "mmlu_eval_accuracy_professional_psychology": 0.4927536231884058,
8405
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
8406
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
8407
+ "mmlu_eval_accuracy_sociology": 0.6363636363636364,
8408
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
8409
+ "mmlu_eval_accuracy_virology": 0.5,
8410
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
8411
+ "mmlu_loss": 1.3448165364505105,
8412
+ "step": 8800
8413
  }
8414
  ],
8415
  "max_steps": 10000,
8416
  "num_train_epochs": 4,
8417
+ "total_flos": 2.672155898548568e+18,
8418
  "trial_name": null,
8419
  "trial_params": null
8420
  }
{checkpoint-6800 β†’ checkpoint-8800}/training_args.bin RENAMED
File without changes