SoccerTwosWSL2 / run_logs /timers.json
Asheron's picture
Push to Hub
82c7473
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.3794636726379395,
"min": 1.2884377241134644,
"max": 3.2957065105438232,
"count": 5000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 26971.2734375,
"min": 15410.125,
"max": 126892.8671875,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 65.13157894736842,
"min": 38.6031746031746,
"max": 999.0,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19800.0,
"min": 3984.0,
"max": 30908.0,
"count": 5000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1742.0551242063318,
"min": 1172.5184660541652,
"max": 1758.9542390081979,
"count": 4912
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 264792.3788793624,
"min": 2366.814687598051,
"max": 437611.8993019908,
"count": 4912
},
"SoccerTwos.Step.mean": {
"value": 49999978.0,
"min": 9796.0,
"max": 49999978.0,
"count": 5000
},
"SoccerTwos.Step.sum": {
"value": 49999978.0,
"min": 9796.0,
"max": 49999978.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.0077554695308208466,
"min": -0.1597510129213333,
"max": 0.1955847293138504,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 1.1788313388824463,
"min": -27.95642852783203,
"max": 33.29837417602539,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.006984925363212824,
"min": -0.1580408215522766,
"max": 0.19503583014011383,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 1.061708688735962,
"min": -27.657142639160156,
"max": 31.99698257446289,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.018971052608991925,
"min": -0.6153846153846154,
"max": 0.38235293416415944,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 2.8835999965667725,
"min": -60.80880010128021,
"max": 66.13839960098267,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.018971052608991925,
"min": -0.6153846153846154,
"max": 0.38235293416415944,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 2.8835999965667725,
"min": -60.80880010128021,
"max": 66.13839960098267,
"count": 5000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.017043410218320787,
"min": 0.009336015598576827,
"max": 0.02597291834341983,
"count": 2421
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.017043410218320787,
"min": 0.009336015598576827,
"max": 0.02597291834341983,
"count": 2421
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10036668106913567,
"min": 1.550573098067313e-07,
"max": 0.12762477546930312,
"count": 2421
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10036668106913567,
"min": 1.550573098067313e-07,
"max": 0.12762477546930312,
"count": 2421
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10176810398697853,
"min": 1.646743659951729e-07,
"max": 0.1294618082543214,
"count": 2421
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10176810398697853,
"min": 1.646743659951729e-07,
"max": 0.1294618082543214,
"count": 2421
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2421
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2421
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 2421
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 2421
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 2421
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 2421
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1701567309",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/home/dfotland/projects/wsl-reinforce/gymnasium-ml-agents/venv-ml-agents/bin/mlagents-learn ./config/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos --run-id=SoccerTwosWSL2 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1701658081"
},
"total": 90771.95874106901,
"count": 1,
"self": 10.0030337300268,
"children": {
"run_training.setup": {
"total": 0.008348964009201154,
"count": 1,
"self": 0.008348964009201154
},
"TrainerController.start_learning": {
"total": 90761.94735837498,
"count": 1,
"self": 43.457351466378896,
"children": {
"TrainerController._reset_env": {
"total": 4.1705112889176235,
"count": 250,
"self": 4.1705112889176235
},
"TrainerController.advance": {
"total": 90714.00739625475,
"count": 3446828,
"self": 44.49552140981541,
"children": {
"env_step": {
"total": 70936.72120019514,
"count": 3446828,
"self": 34428.18149219357,
"children": {
"SubprocessEnvManager._take_step": {
"total": 36478.73816043974,
"count": 3446828,
"self": 269.9233846195275,
"children": {
"TorchPolicy.evaluate": {
"total": 36208.81477582021,
"count": 6286048,
"self": 36208.81477582021
}
}
},
"workers": {
"total": 29.801547561830375,
"count": 3446828,
"self": 0.0,
"children": {
"worker_root": {
"total": 90679.03026842861,
"count": 3446828,
"is_parallel": true,
"self": 61144.455360339896,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0014958319952711463,
"count": 2,
"is_parallel": true,
"self": 0.00038458796916529536,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001111244026105851,
"count": 8,
"is_parallel": true,
"self": 0.001111244026105851
}
}
},
"UnityEnvironment.step": {
"total": 0.0154575560009107,
"count": 1,
"is_parallel": true,
"self": 0.00027115203556604683,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020585799938999116,
"count": 1,
"is_parallel": true,
"self": 0.00020585799938999116
},
"communicator.exchange": {
"total": 0.014187530003255233,
"count": 1,
"is_parallel": true,
"self": 0.014187530003255233
},
"steps_from_proto": {
"total": 0.0007930159626994282,
"count": 2,
"is_parallel": true,
"self": 0.00016426198999397457,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006287539727054536,
"count": 8,
"is_parallel": true,
"self": 0.0006287539727054536
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 29534.345643326495,
"count": 3446827,
"is_parallel": true,
"self": 1003.6647927839076,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 644.072898122773,
"count": 3446827,
"is_parallel": true,
"self": 644.072898122773
},
"communicator.exchange": {
"total": 25124.179021216987,
"count": 3446827,
"is_parallel": true,
"self": 25124.179021216987
},
"steps_from_proto": {
"total": 2762.428931202827,
"count": 6893654,
"is_parallel": true,
"self": 560.9788271841826,
"children": {
"_process_rank_one_or_two_observation": {
"total": 2201.450104018644,
"count": 27574616,
"is_parallel": true,
"self": 2201.450104018644
}
}
}
}
},
"steps_from_proto": {
"total": 0.22926476222346537,
"count": 498,
"is_parallel": true,
"self": 0.046726839820621535,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.18253792240284383,
"count": 1992,
"is_parallel": true,
"self": 0.18253792240284383
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 19732.79067464979,
"count": 3446828,
"self": 361.4160051846702,
"children": {
"process_trajectory": {
"total": 12432.33903780859,
"count": 3446828,
"self": 12416.554201690538,
"children": {
"RLTrainer._checkpoint": {
"total": 15.784836118051317,
"count": 100,
"self": 15.784836118051317
}
}
},
"_update_policy": {
"total": 6939.03563165653,
"count": 2421,
"self": 3189.62622921844,
"children": {
"TorchPOCAOptimizer.update": {
"total": 3749.40940243809,
"count": 72639,
"self": 3749.40940243809
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.259636625647545e-07,
"count": 1,
"self": 4.259636625647545e-07
},
"TrainerController._save_models": {
"total": 0.3120989389717579,
"count": 1,
"self": 0.0008117289980873466,
"children": {
"RLTrainer._checkpoint": {
"total": 0.31128720997367054,
"count": 1,
"self": 0.31128720997367054
}
}
}
}
}
}
}