csikasote commited on
Commit
0e7d24a
·
verified ·
1 Parent(s): d47ab15

End of training

Browse files
README.md CHANGED
@@ -4,11 +4,23 @@ license: apache-2.0
4
  base_model: openai/whisper-medium
5
  tags:
6
  - generated_from_trainer
 
 
7
  metrics:
8
  - wer
9
  model-index:
10
  - name: whisper-medium-bigcgen-male-5hrs-62
11
- results: []
 
 
 
 
 
 
 
 
 
 
12
  ---
13
 
14
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -16,10 +28,10 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # whisper-medium-bigcgen-male-5hrs-62
18
 
19
- This model is a fine-tuned version of [openai/whisper-medium](https://huggingface.co/openai/whisper-medium) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
- - Loss: 0.7554
22
- - Wer: 0.4879
23
 
24
  ## Model description
25
 
 
4
  base_model: openai/whisper-medium
5
  tags:
6
  - generated_from_trainer
7
+ datasets:
8
+ - bigcgen
9
  metrics:
10
  - wer
11
  model-index:
12
  - name: whisper-medium-bigcgen-male-5hrs-62
13
+ results:
14
+ - task:
15
+ name: Automatic Speech Recognition
16
+ type: automatic-speech-recognition
17
+ dataset:
18
+ name: bigcgen
19
+ type: bigcgen
20
+ metrics:
21
+ - name: Wer
22
+ type: wer
23
+ value: 0.516229862947824
24
  ---
25
 
26
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
28
 
29
  # whisper-medium-bigcgen-male-5hrs-62
30
 
31
+ This model is a fine-tuned version of [openai/whisper-medium](https://huggingface.co/openai/whisper-medium) on the bigcgen dataset.
32
  It achieves the following results on the evaluation set:
33
+ - Loss: 0.6629
34
+ - Wer: 0.5162
35
 
36
  ## Model description
37
 
all_results.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.3478260869565215,
3
+ "eval_loss": 0.6628760695457458,
4
+ "eval_runtime": 266.4169,
5
+ "eval_samples": 441,
6
+ "eval_samples_per_second": 1.655,
7
+ "eval_steps_per_second": 0.83,
8
+ "eval_wer": 0.516229862947824,
9
+ "total_flos": 1.1430774964224e+19,
10
+ "train_loss": 0.6237195907320295,
11
+ "train_runtime": 4683.4552,
12
+ "train_samples": 2576,
13
+ "train_samples_per_second": 8.541,
14
+ "train_steps_per_second": 1.068
15
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.3478260869565215,
3
+ "eval_loss": 0.6628760695457458,
4
+ "eval_runtime": 266.4169,
5
+ "eval_samples": 441,
6
+ "eval_samples_per_second": 1.655,
7
+ "eval_steps_per_second": 0.83,
8
+ "eval_wer": 0.516229862947824
9
+ }
runs/Aug09_11-00-51_srvrocgpu011.uct.ac.za/events.out.tfevents.1754735181.srvrocgpu011.uct.ac.za.3072232.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b156c72417466b88b97c1c6235d545b6c46a992bcb1bcf4ca8c2054c21a2ad1
3
+ size 406
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.3478260869565215,
3
+ "total_flos": 1.1430774964224e+19,
4
+ "train_loss": 0.6237195907320295,
5
+ "train_runtime": 4683.4552,
6
+ "train_samples": 2576,
7
+ "train_samples_per_second": 8.541,
8
+ "train_steps_per_second": 1.068
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,507 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 600,
3
+ "best_metric": 0.6628760695457458,
4
+ "best_model_checkpoint": "/scratch/skscla001/experiments/datasets/results/whisper-medium-bigcgen-male-5hrs-62/checkpoint-600",
5
+ "epoch": 4.3478260869565215,
6
+ "eval_steps": 200,
7
+ "global_step": 1400,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.07763975155279502,
14
+ "grad_norm": 33.341270446777344,
15
+ "learning_rate": 4.0000000000000003e-07,
16
+ "loss": 3.3362,
17
+ "step": 25
18
+ },
19
+ {
20
+ "epoch": 0.15527950310559005,
21
+ "grad_norm": 26.828624725341797,
22
+ "learning_rate": 9.000000000000001e-07,
23
+ "loss": 2.6418,
24
+ "step": 50
25
+ },
26
+ {
27
+ "epoch": 0.2329192546583851,
28
+ "grad_norm": 25.673059463500977,
29
+ "learning_rate": 1.4000000000000001e-06,
30
+ "loss": 2.017,
31
+ "step": 75
32
+ },
33
+ {
34
+ "epoch": 0.3105590062111801,
35
+ "grad_norm": 17.314512252807617,
36
+ "learning_rate": 1.9000000000000002e-06,
37
+ "loss": 1.4498,
38
+ "step": 100
39
+ },
40
+ {
41
+ "epoch": 0.38819875776397517,
42
+ "grad_norm": 17.275650024414062,
43
+ "learning_rate": 2.4000000000000003e-06,
44
+ "loss": 1.2023,
45
+ "step": 125
46
+ },
47
+ {
48
+ "epoch": 0.4658385093167702,
49
+ "grad_norm": 17.702608108520508,
50
+ "learning_rate": 2.9e-06,
51
+ "loss": 1.0835,
52
+ "step": 150
53
+ },
54
+ {
55
+ "epoch": 0.5434782608695652,
56
+ "grad_norm": 15.095105171203613,
57
+ "learning_rate": 3.4000000000000005e-06,
58
+ "loss": 1.1593,
59
+ "step": 175
60
+ },
61
+ {
62
+ "epoch": 0.6211180124223602,
63
+ "grad_norm": 16.568078994750977,
64
+ "learning_rate": 3.900000000000001e-06,
65
+ "loss": 0.956,
66
+ "step": 200
67
+ },
68
+ {
69
+ "epoch": 0.6211180124223602,
70
+ "eval_loss": 0.8870997428894043,
71
+ "eval_runtime": 276.5554,
72
+ "eval_samples_per_second": 1.595,
73
+ "eval_steps_per_second": 0.799,
74
+ "eval_wer": 0.6443856696321231,
75
+ "step": 200
76
+ },
77
+ {
78
+ "epoch": 0.6987577639751553,
79
+ "grad_norm": 17.126779556274414,
80
+ "learning_rate": 4.4e-06,
81
+ "loss": 1.0133,
82
+ "step": 225
83
+ },
84
+ {
85
+ "epoch": 0.7763975155279503,
86
+ "grad_norm": 15.565762519836426,
87
+ "learning_rate": 4.9000000000000005e-06,
88
+ "loss": 1.0108,
89
+ "step": 250
90
+ },
91
+ {
92
+ "epoch": 0.8540372670807453,
93
+ "grad_norm": 19.874563217163086,
94
+ "learning_rate": 5.400000000000001e-06,
95
+ "loss": 0.792,
96
+ "step": 275
97
+ },
98
+ {
99
+ "epoch": 0.9316770186335404,
100
+ "grad_norm": 13.371776580810547,
101
+ "learning_rate": 5.9e-06,
102
+ "loss": 0.7788,
103
+ "step": 300
104
+ },
105
+ {
106
+ "epoch": 1.0093167701863355,
107
+ "grad_norm": 13.305375099182129,
108
+ "learning_rate": 6.4000000000000006e-06,
109
+ "loss": 0.8068,
110
+ "step": 325
111
+ },
112
+ {
113
+ "epoch": 1.0869565217391304,
114
+ "grad_norm": 16.924894332885742,
115
+ "learning_rate": 6.9e-06,
116
+ "loss": 0.5933,
117
+ "step": 350
118
+ },
119
+ {
120
+ "epoch": 1.1645962732919255,
121
+ "grad_norm": 13.33420467376709,
122
+ "learning_rate": 7.4e-06,
123
+ "loss": 0.7016,
124
+ "step": 375
125
+ },
126
+ {
127
+ "epoch": 1.2422360248447206,
128
+ "grad_norm": 15.580875396728516,
129
+ "learning_rate": 7.9e-06,
130
+ "loss": 0.6779,
131
+ "step": 400
132
+ },
133
+ {
134
+ "epoch": 1.2422360248447206,
135
+ "eval_loss": 0.7327317595481873,
136
+ "eval_runtime": 265.8594,
137
+ "eval_samples_per_second": 1.659,
138
+ "eval_steps_per_second": 0.831,
139
+ "eval_wer": 0.5585477278191873,
140
+ "step": 400
141
+ },
142
+ {
143
+ "epoch": 1.3198757763975155,
144
+ "grad_norm": 14.909109115600586,
145
+ "learning_rate": 8.400000000000001e-06,
146
+ "loss": 0.6421,
147
+ "step": 425
148
+ },
149
+ {
150
+ "epoch": 1.3975155279503104,
151
+ "grad_norm": 15.768739700317383,
152
+ "learning_rate": 8.900000000000001e-06,
153
+ "loss": 0.6492,
154
+ "step": 450
155
+ },
156
+ {
157
+ "epoch": 1.4751552795031055,
158
+ "grad_norm": 10.104466438293457,
159
+ "learning_rate": 9.4e-06,
160
+ "loss": 0.6439,
161
+ "step": 475
162
+ },
163
+ {
164
+ "epoch": 1.5527950310559007,
165
+ "grad_norm": 12.118277549743652,
166
+ "learning_rate": 9.9e-06,
167
+ "loss": 0.6893,
168
+ "step": 500
169
+ },
170
+ {
171
+ "epoch": 1.6304347826086958,
172
+ "grad_norm": 11.13495922088623,
173
+ "learning_rate": 9.955555555555556e-06,
174
+ "loss": 0.6307,
175
+ "step": 525
176
+ },
177
+ {
178
+ "epoch": 1.7080745341614907,
179
+ "grad_norm": 12.645089149475098,
180
+ "learning_rate": 9.9e-06,
181
+ "loss": 0.603,
182
+ "step": 550
183
+ },
184
+ {
185
+ "epoch": 1.7857142857142856,
186
+ "grad_norm": 17.46633529663086,
187
+ "learning_rate": 9.844444444444446e-06,
188
+ "loss": 0.657,
189
+ "step": 575
190
+ },
191
+ {
192
+ "epoch": 1.8633540372670807,
193
+ "grad_norm": 8.319685935974121,
194
+ "learning_rate": 9.78888888888889e-06,
195
+ "loss": 0.5879,
196
+ "step": 600
197
+ },
198
+ {
199
+ "epoch": 1.8633540372670807,
200
+ "eval_loss": 0.6628760695457458,
201
+ "eval_runtime": 270.2431,
202
+ "eval_samples_per_second": 1.632,
203
+ "eval_steps_per_second": 0.818,
204
+ "eval_wer": 0.516229862947824,
205
+ "step": 600
206
+ },
207
+ {
208
+ "epoch": 1.9409937888198758,
209
+ "grad_norm": 11.236241340637207,
210
+ "learning_rate": 9.733333333333334e-06,
211
+ "loss": 0.6359,
212
+ "step": 625
213
+ },
214
+ {
215
+ "epoch": 2.018633540372671,
216
+ "grad_norm": 13.337384223937988,
217
+ "learning_rate": 9.677777777777778e-06,
218
+ "loss": 0.5992,
219
+ "step": 650
220
+ },
221
+ {
222
+ "epoch": 2.0962732919254656,
223
+ "grad_norm": 8.325053215026855,
224
+ "learning_rate": 9.622222222222222e-06,
225
+ "loss": 0.3897,
226
+ "step": 675
227
+ },
228
+ {
229
+ "epoch": 2.1739130434782608,
230
+ "grad_norm": 12.123583793640137,
231
+ "learning_rate": 9.566666666666668e-06,
232
+ "loss": 0.4313,
233
+ "step": 700
234
+ },
235
+ {
236
+ "epoch": 2.251552795031056,
237
+ "grad_norm": 13.106657028198242,
238
+ "learning_rate": 9.511111111111112e-06,
239
+ "loss": 0.3598,
240
+ "step": 725
241
+ },
242
+ {
243
+ "epoch": 2.329192546583851,
244
+ "grad_norm": 8.855884552001953,
245
+ "learning_rate": 9.455555555555557e-06,
246
+ "loss": 0.4473,
247
+ "step": 750
248
+ },
249
+ {
250
+ "epoch": 2.406832298136646,
251
+ "grad_norm": 8.3619966506958,
252
+ "learning_rate": 9.4e-06,
253
+ "loss": 0.3956,
254
+ "step": 775
255
+ },
256
+ {
257
+ "epoch": 2.4844720496894412,
258
+ "grad_norm": 9.356673240661621,
259
+ "learning_rate": 9.344444444444446e-06,
260
+ "loss": 0.4268,
261
+ "step": 800
262
+ },
263
+ {
264
+ "epoch": 2.4844720496894412,
265
+ "eval_loss": 0.6734069585800171,
266
+ "eval_runtime": 275.7178,
267
+ "eval_samples_per_second": 1.599,
268
+ "eval_steps_per_second": 0.802,
269
+ "eval_wer": 0.5006011060351045,
270
+ "step": 800
271
+ },
272
+ {
273
+ "epoch": 2.562111801242236,
274
+ "grad_norm": 6.666379928588867,
275
+ "learning_rate": 9.28888888888889e-06,
276
+ "loss": 0.389,
277
+ "step": 825
278
+ },
279
+ {
280
+ "epoch": 2.639751552795031,
281
+ "grad_norm": 11.296941757202148,
282
+ "learning_rate": 9.233333333333334e-06,
283
+ "loss": 0.3558,
284
+ "step": 850
285
+ },
286
+ {
287
+ "epoch": 2.717391304347826,
288
+ "grad_norm": 14.280022621154785,
289
+ "learning_rate": 9.17777777777778e-06,
290
+ "loss": 0.4083,
291
+ "step": 875
292
+ },
293
+ {
294
+ "epoch": 2.795031055900621,
295
+ "grad_norm": 11.778422355651855,
296
+ "learning_rate": 9.122222222222223e-06,
297
+ "loss": 0.4209,
298
+ "step": 900
299
+ },
300
+ {
301
+ "epoch": 2.8726708074534164,
302
+ "grad_norm": 8.68419075012207,
303
+ "learning_rate": 9.066666666666667e-06,
304
+ "loss": 0.3953,
305
+ "step": 925
306
+ },
307
+ {
308
+ "epoch": 2.950310559006211,
309
+ "grad_norm": 15.859999656677246,
310
+ "learning_rate": 9.011111111111111e-06,
311
+ "loss": 0.3805,
312
+ "step": 950
313
+ },
314
+ {
315
+ "epoch": 3.027950310559006,
316
+ "grad_norm": 6.30320930480957,
317
+ "learning_rate": 8.955555555555555e-06,
318
+ "loss": 0.3059,
319
+ "step": 975
320
+ },
321
+ {
322
+ "epoch": 3.1055900621118013,
323
+ "grad_norm": 6.796067714691162,
324
+ "learning_rate": 8.900000000000001e-06,
325
+ "loss": 0.1979,
326
+ "step": 1000
327
+ },
328
+ {
329
+ "epoch": 3.1055900621118013,
330
+ "eval_loss": 0.7146615982055664,
331
+ "eval_runtime": 265.096,
332
+ "eval_samples_per_second": 1.664,
333
+ "eval_steps_per_second": 0.834,
334
+ "eval_wer": 0.4765568646309209,
335
+ "step": 1000
336
+ },
337
+ {
338
+ "epoch": 3.1832298136645965,
339
+ "grad_norm": 6.765524864196777,
340
+ "learning_rate": 8.844444444444445e-06,
341
+ "loss": 0.2458,
342
+ "step": 1025
343
+ },
344
+ {
345
+ "epoch": 3.260869565217391,
346
+ "grad_norm": 6.841522693634033,
347
+ "learning_rate": 8.788888888888891e-06,
348
+ "loss": 0.2144,
349
+ "step": 1050
350
+ },
351
+ {
352
+ "epoch": 3.3385093167701863,
353
+ "grad_norm": 7.854112148284912,
354
+ "learning_rate": 8.733333333333333e-06,
355
+ "loss": 0.2255,
356
+ "step": 1075
357
+ },
358
+ {
359
+ "epoch": 3.4161490683229814,
360
+ "grad_norm": 6.754302501678467,
361
+ "learning_rate": 8.677777777777779e-06,
362
+ "loss": 0.2295,
363
+ "step": 1100
364
+ },
365
+ {
366
+ "epoch": 3.4937888198757765,
367
+ "grad_norm": 10.24082088470459,
368
+ "learning_rate": 8.622222222222223e-06,
369
+ "loss": 0.214,
370
+ "step": 1125
371
+ },
372
+ {
373
+ "epoch": 3.571428571428571,
374
+ "grad_norm": 8.713047981262207,
375
+ "learning_rate": 8.566666666666667e-06,
376
+ "loss": 0.2529,
377
+ "step": 1150
378
+ },
379
+ {
380
+ "epoch": 3.6490683229813663,
381
+ "grad_norm": 10.281318664550781,
382
+ "learning_rate": 8.511111111111113e-06,
383
+ "loss": 0.2617,
384
+ "step": 1175
385
+ },
386
+ {
387
+ "epoch": 3.7267080745341614,
388
+ "grad_norm": 9.869316101074219,
389
+ "learning_rate": 8.455555555555555e-06,
390
+ "loss": 0.2338,
391
+ "step": 1200
392
+ },
393
+ {
394
+ "epoch": 3.7267080745341614,
395
+ "eval_loss": 0.7034153938293457,
396
+ "eval_runtime": 264.3393,
397
+ "eval_samples_per_second": 1.668,
398
+ "eval_steps_per_second": 0.836,
399
+ "eval_wer": 0.48689588843471987,
400
+ "step": 1200
401
+ },
402
+ {
403
+ "epoch": 3.8043478260869565,
404
+ "grad_norm": 4.6300835609436035,
405
+ "learning_rate": 8.400000000000001e-06,
406
+ "loss": 0.2299,
407
+ "step": 1225
408
+ },
409
+ {
410
+ "epoch": 3.8819875776397517,
411
+ "grad_norm": 6.275107383728027,
412
+ "learning_rate": 8.344444444444445e-06,
413
+ "loss": 0.1803,
414
+ "step": 1250
415
+ },
416
+ {
417
+ "epoch": 3.9596273291925463,
418
+ "grad_norm": 7.057643413543701,
419
+ "learning_rate": 8.288888888888889e-06,
420
+ "loss": 0.2024,
421
+ "step": 1275
422
+ },
423
+ {
424
+ "epoch": 4.037267080745342,
425
+ "grad_norm": 7.044947147369385,
426
+ "learning_rate": 8.233333333333335e-06,
427
+ "loss": 0.1694,
428
+ "step": 1300
429
+ },
430
+ {
431
+ "epoch": 4.114906832298137,
432
+ "grad_norm": 5.649941921234131,
433
+ "learning_rate": 8.177777777777779e-06,
434
+ "loss": 0.1026,
435
+ "step": 1325
436
+ },
437
+ {
438
+ "epoch": 4.192546583850931,
439
+ "grad_norm": 6.779381275177002,
440
+ "learning_rate": 8.122222222222223e-06,
441
+ "loss": 0.1207,
442
+ "step": 1350
443
+ },
444
+ {
445
+ "epoch": 4.270186335403727,
446
+ "grad_norm": 4.534013271331787,
447
+ "learning_rate": 8.066666666666667e-06,
448
+ "loss": 0.081,
449
+ "step": 1375
450
+ },
451
+ {
452
+ "epoch": 4.3478260869565215,
453
+ "grad_norm": 10.306833267211914,
454
+ "learning_rate": 8.011111111111113e-06,
455
+ "loss": 0.1014,
456
+ "step": 1400
457
+ },
458
+ {
459
+ "epoch": 4.3478260869565215,
460
+ "eval_loss": 0.7553574442863464,
461
+ "eval_runtime": 263.3312,
462
+ "eval_samples_per_second": 1.675,
463
+ "eval_steps_per_second": 0.839,
464
+ "eval_wer": 0.4878576580908872,
465
+ "step": 1400
466
+ },
467
+ {
468
+ "epoch": 4.3478260869565215,
469
+ "step": 1400,
470
+ "total_flos": 1.1430774964224e+19,
471
+ "train_loss": 0.6237195907320295,
472
+ "train_runtime": 4683.4552,
473
+ "train_samples_per_second": 8.541,
474
+ "train_steps_per_second": 1.068
475
+ }
476
+ ],
477
+ "logging_steps": 25,
478
+ "max_steps": 5000,
479
+ "num_input_tokens_seen": 0,
480
+ "num_train_epochs": 16,
481
+ "save_steps": 200,
482
+ "stateful_callbacks": {
483
+ "EarlyStoppingCallback": {
484
+ "args": {
485
+ "early_stopping_patience": 4,
486
+ "early_stopping_threshold": 0.0
487
+ },
488
+ "attributes": {
489
+ "early_stopping_patience_counter": 4
490
+ }
491
+ },
492
+ "TrainerControl": {
493
+ "args": {
494
+ "should_epoch_stop": false,
495
+ "should_evaluate": false,
496
+ "should_log": false,
497
+ "should_save": true,
498
+ "should_training_stop": true
499
+ },
500
+ "attributes": {}
501
+ }
502
+ },
503
+ "total_flos": 1.1430774964224e+19,
504
+ "train_batch_size": 2,
505
+ "trial_name": null,
506
+ "trial_params": null
507
+ }