lerobot_v2_ball010-5e0cj6ub5l / trainer_state.json
LegrandFrederic's picture
Upload trainer_state.json with huggingface_hub
25d69ea verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 19.807692307692307,
"eval_steps": 500,
"global_step": 515,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.38461538461538464,
"grad_norm": 3.364804983139038,
"learning_rate": 7.692307692307693e-05,
"loss": 1.4034,
"step": 10
},
{
"epoch": 0.7692307692307693,
"grad_norm": 1.2244789600372314,
"learning_rate": 0.00015384615384615385,
"loss": 0.4222,
"step": 20
},
{
"epoch": 1.1538461538461537,
"grad_norm": 1.535311222076416,
"learning_rate": 0.00019996698220143899,
"loss": 0.2313,
"step": 30
},
{
"epoch": 1.5384615384615383,
"grad_norm": 0.9641413688659668,
"learning_rate": 0.0001995957823217325,
"loss": 0.1714,
"step": 40
},
{
"epoch": 1.9230769230769231,
"grad_norm": 1.1909210681915283,
"learning_rate": 0.00019881364700727823,
"loss": 0.148,
"step": 50
},
{
"epoch": 2.3076923076923075,
"grad_norm": 0.9179659485816956,
"learning_rate": 0.0001976238033738033,
"loss": 0.1358,
"step": 60
},
{
"epoch": 2.6923076923076925,
"grad_norm": 1.524248719215393,
"learning_rate": 0.00019603116075480955,
"loss": 0.1366,
"step": 70
},
{
"epoch": 3.076923076923077,
"grad_norm": 0.7116435766220093,
"learning_rate": 0.00019404229044550433,
"loss": 0.1132,
"step": 80
},
{
"epoch": 3.4615384615384617,
"grad_norm": 0.4233417510986328,
"learning_rate": 0.00019166539858942255,
"loss": 0.1099,
"step": 90
},
{
"epoch": 3.8461538461538463,
"grad_norm": 0.3895157277584076,
"learning_rate": 0.00018891029231961207,
"loss": 0.0973,
"step": 100
},
{
"epoch": 4.230769230769231,
"grad_norm": 0.6012492179870605,
"learning_rate": 0.0001857883392940837,
"loss": 0.0974,
"step": 110
},
{
"epoch": 4.615384615384615,
"grad_norm": 0.4354020059108734,
"learning_rate": 0.00018231242079248512,
"loss": 0.0853,
"step": 120
},
{
"epoch": 5.0,
"grad_norm": 0.6374463438987732,
"learning_rate": 0.00017849687856752208,
"loss": 0.0814,
"step": 130
},
{
"epoch": 5.384615384615385,
"grad_norm": 0.4828743636608124,
"learning_rate": 0.00017435745567042095,
"loss": 0.083,
"step": 140
},
{
"epoch": 5.769230769230769,
"grad_norm": 0.2791459560394287,
"learning_rate": 0.00016991123149458738,
"loss": 0.0769,
"step": 150
},
{
"epoch": 6.153846153846154,
"grad_norm": 0.3161262273788452,
"learning_rate": 0.00016517655130547437,
"loss": 0.0819,
"step": 160
},
{
"epoch": 6.538461538461538,
"grad_norm": 0.5798579454421997,
"learning_rate": 0.00016017295054742046,
"loss": 0.0688,
"step": 170
},
{
"epoch": 6.923076923076923,
"grad_norm": 0.38132503628730774,
"learning_rate": 0.00015492107423977167,
"loss": 0.0618,
"step": 180
},
{
"epoch": 7.3076923076923075,
"grad_norm": 0.4480208158493042,
"learning_rate": 0.00014944259179486066,
"loss": 0.0678,
"step": 190
},
{
"epoch": 7.6923076923076925,
"grad_norm": 0.3681942820549011,
"learning_rate": 0.00014376010760930728,
"loss": 0.0629,
"step": 200
},
{
"epoch": 8.076923076923077,
"grad_norm": 0.5002759695053101,
"learning_rate": 0.00013789706779754327,
"loss": 0.0655,
"step": 210
},
{
"epoch": 8.461538461538462,
"grad_norm": 0.4140470325946808,
"learning_rate": 0.00013187766345238222,
"loss": 0.0623,
"step": 220
},
{
"epoch": 8.846153846153847,
"grad_norm": 0.37125447392463684,
"learning_rate": 0.0001257267308317845,
"loss": 0.0584,
"step": 230
},
{
"epoch": 9.23076923076923,
"grad_norm": 0.3931460976600647,
"learning_rate": 0.0001194696488836495,
"loss": 0.0615,
"step": 240
},
{
"epoch": 9.615384615384615,
"grad_norm": 0.4081181287765503,
"learning_rate": 0.00011313223453145201,
"loss": 0.0541,
"step": 250
},
{
"epoch": 10.0,
"grad_norm": 0.42426028847694397,
"learning_rate": 0.0001067406361527768,
"loss": 0.0591,
"step": 260
},
{
"epoch": 10.384615384615385,
"grad_norm": 0.389825701713562,
"learning_rate": 0.00010032122569026283,
"loss": 0.0565,
"step": 270
},
{
"epoch": 10.76923076923077,
"grad_norm": 0.4282422363758087,
"learning_rate": 9.390048984011096e-05,
"loss": 0.0492,
"step": 280
},
{
"epoch": 11.153846153846153,
"grad_norm": 0.33358335494995117,
"learning_rate": 8.750492076711439e-05,
"loss": 0.0462,
"step": 290
},
{
"epoch": 11.538461538461538,
"grad_norm": 0.3536369204521179,
"learning_rate": 8.1160906797126e-05,
"loss": 0.0547,
"step": 300
},
{
"epoch": 11.923076923076923,
"grad_norm": 0.3393729329109192,
"learning_rate": 7.489462353796793e-05,
"loss": 0.0539,
"step": 310
},
{
"epoch": 12.307692307692308,
"grad_norm": 0.26623183488845825,
"learning_rate": 6.87319258780234e-05,
"loss": 0.04,
"step": 320
},
{
"epoch": 12.692307692307692,
"grad_norm": 0.34395718574523926,
"learning_rate": 6.269824130812645e-05,
"loss": 0.0493,
"step": 330
},
{
"epoch": 13.076923076923077,
"grad_norm": 0.2601974904537201,
"learning_rate": 5.681846500690884e-05,
"loss": 0.054,
"step": 340
},
{
"epoch": 13.461538461538462,
"grad_norm": 0.20003391802310944,
"learning_rate": 5.111685712248364e-05,
"loss": 0.0489,
"step": 350
},
{
"epoch": 13.846153846153847,
"grad_norm": 0.20917710661888123,
"learning_rate": 4.56169426742856e-05,
"loss": 0.0536,
"step": 360
},
{
"epoch": 14.23076923076923,
"grad_norm": 0.2096697986125946,
"learning_rate": 4.03414144880767e-05,
"loss": 0.0436,
"step": 370
},
{
"epoch": 14.615384615384615,
"grad_norm": 0.2338918149471283,
"learning_rate": 3.53120395646092e-05,
"loss": 0.0413,
"step": 380
},
{
"epoch": 15.0,
"grad_norm": 0.2506970465183258,
"learning_rate": 3.054956926827332e-05,
"loss": 0.0418,
"step": 390
},
{
"epoch": 15.384615384615385,
"grad_norm": 0.16216574609279633,
"learning_rate": 2.60736537062932e-05,
"loss": 0.0346,
"step": 400
},
{
"epoch": 15.76923076923077,
"grad_norm": 0.26421189308166504,
"learning_rate": 2.1902760651745958e-05,
"loss": 0.0457,
"step": 410
},
{
"epoch": 16.153846153846153,
"grad_norm": 0.2853449881076813,
"learning_rate": 1.8054099344929832e-05,
"loss": 0.0432,
"step": 420
},
{
"epoch": 16.53846153846154,
"grad_norm": 0.2742384672164917,
"learning_rate": 1.454354948747909e-05,
"loss": 0.0411,
"step": 430
},
{
"epoch": 16.923076923076923,
"grad_norm": 0.2132396250963211,
"learning_rate": 1.1385595722199438e-05,
"loss": 0.0372,
"step": 440
},
{
"epoch": 17.307692307692307,
"grad_norm": 0.23929736018180847,
"learning_rate": 8.593267868960675e-06,
"loss": 0.0336,
"step": 450
},
{
"epoch": 17.692307692307693,
"grad_norm": 0.16594846546649933,
"learning_rate": 6.178087163236645e-06,
"loss": 0.0383,
"step": 460
},
{
"epoch": 18.076923076923077,
"grad_norm": 0.18442106246948242,
"learning_rate": 4.1500187191131466e-06,
"loss": 0.0361,
"step": 470
},
{
"epoch": 18.46153846153846,
"grad_norm": 0.20087499916553497,
"learning_rate": 2.5174304129033653e-06,
"loss": 0.036,
"step": 480
},
{
"epoch": 18.846153846153847,
"grad_norm": 0.24509386718273163,
"learning_rate": 1.287058357018278e-06,
"loss": 0.0335,
"step": 490
},
{
"epoch": 19.23076923076923,
"grad_norm": 0.22717131674289703,
"learning_rate": 4.639791065478738e-07,
"loss": 0.0381,
"step": 500
},
{
"epoch": 19.615384615384617,
"grad_norm": 0.14815658330917358,
"learning_rate": 5.158871322984427e-08,
"loss": 0.0374,
"step": 510
},
{
"epoch": 19.807692307692307,
"step": 515,
"total_flos": 7.15348914538104e+16,
"train_loss": 0.10100355527354675,
"train_runtime": 643.3659,
"train_samples_per_second": 51.231,
"train_steps_per_second": 0.8
}
],
"logging_steps": 10,
"max_steps": 515,
"num_input_tokens_seen": 0,
"num_train_epochs": 20,
"save_steps": 10000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7.15348914538104e+16,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}