ihanif commited on
Commit
45e0bc3
·
verified ·
1 Parent(s): 1a288ea

Training in progress, step 200, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e899319e2a6deced8832230fdfd05f5866c1c499db7790d3cc8a3fd23431bebd
3
  size 290403936
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fca209fbf14bb03b14596a4a7a7e641686e5c741ee80a8d63177163d98de4032
3
  size 290403936
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8f52777292bc4955b21021034bd356919653befec6b7cbe5b883450548ad7558
3
  size 574811514
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88bd5955ffa0d6a8ab77c320b1bac1d2b8c25c3ecdfa9fac75f30e17a28c7795
3
  size 574811514
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c9b1d6fb48751f5d2b47d7f0ba3ae8e90c30c9802e10d676d0257941f38d0753
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b6321d2d69b9a8969ff838f442e78895a976e15f45834ce2fec948fc2a5b7ab
3
  size 14244
last-checkpoint/scaler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:46cb3a7cf266128ea12b82e6f01a8b22cd7a573c098bba1dca18d6b9fda3121d
3
  size 988
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:401546997e9540c36b01d5a34535bd43dfe01519d1e848bfb7825d3197c47e9d
3
  size 988
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5065b6b7372b99dc75b33ee3c07c8e8f0697cdb322a117e8f7109e80a71e97cf
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f612818c5fb8e8465abfb32d1a77c014d6d3622c9188f348caa044647cbde875
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -2,9 +2,9 @@
2
  "best_global_step": 100,
3
  "best_metric": 26.867363647814585,
4
  "best_model_checkpoint": "whisper-base-synth-v1/checkpoint-100",
5
- "epoch": 1.0638297872340425,
6
  "eval_steps": 100,
7
- "global_step": 100,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
@@ -46,6 +46,44 @@
46
  "eval_wer": 26.867363647814585,
47
  "eval_wer_ortho": 29.74588938714499,
48
  "step": 100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  }
50
  ],
51
  "logging_steps": 25,
@@ -65,7 +103,7 @@
65
  "attributes": {}
66
  }
67
  },
68
- "total_flos": 4.1406621548544e+17,
69
  "train_batch_size": 64,
70
  "trial_name": null,
71
  "trial_params": null
 
2
  "best_global_step": 100,
3
  "best_metric": 26.867363647814585,
4
  "best_model_checkpoint": "whisper-base-synth-v1/checkpoint-100",
5
+ "epoch": 2.127659574468085,
6
  "eval_steps": 100,
7
+ "global_step": 200,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
 
46
  "eval_wer": 26.867363647814585,
47
  "eval_wer_ortho": 29.74588938714499,
48
  "step": 100
49
+ },
50
+ {
51
+ "epoch": 1.3297872340425532,
52
+ "grad_norm": 4.06046724319458,
53
+ "learning_rate": 6.489361702127659e-06,
54
+ "loss": 4.6403,
55
+ "step": 125
56
+ },
57
+ {
58
+ "epoch": 1.5957446808510638,
59
+ "grad_norm": 4.466408729553223,
60
+ "learning_rate": 7.819148936170213e-06,
61
+ "loss": 4.4984,
62
+ "step": 150
63
+ },
64
+ {
65
+ "epoch": 1.8617021276595744,
66
+ "grad_norm": 4.118725776672363,
67
+ "learning_rate": 9.148936170212767e-06,
68
+ "loss": 4.3114,
69
+ "step": 175
70
+ },
71
+ {
72
+ "epoch": 2.127659574468085,
73
+ "grad_norm": 3.9775619506835938,
74
+ "learning_rate": 1.047872340425532e-05,
75
+ "loss": 4.0801,
76
+ "step": 200
77
+ },
78
+ {
79
+ "epoch": 2.127659574468085,
80
+ "eval_loss": 0.42653512954711914,
81
+ "eval_runtime": 777.2367,
82
+ "eval_samples_per_second": 3.604,
83
+ "eval_steps_per_second": 0.057,
84
+ "eval_wer": 82.38653062938238,
85
+ "eval_wer_ortho": 79.16874273376516,
86
+ "step": 200
87
  }
88
  ],
89
  "logging_steps": 25,
 
103
  "attributes": {}
104
  }
105
  },
106
+ "total_flos": 8.2813243097088e+17,
107
  "train_batch_size": 64,
108
  "trial_name": null,
109
  "trial_params": null