ppo-Pyramids / run_logs /timers.json
akdeniz27's picture
First Push
d9b3f9c
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6115803122520447,
"min": 0.6094377040863037,
"max": 1.4565856456756592,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 18376.765625,
"min": 18185.62109375,
"max": 44186.98046875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989940.0,
"min": 29952.0,
"max": 989940.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989940.0,
"min": 29952.0,
"max": 989940.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.2192930281162262,
"min": -0.09305952489376068,
"max": 0.2192930281162262,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 56.139015197753906,
"min": -22.427345275878906,
"max": 56.139015197753906,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.08302832394838333,
"min": -0.08302832394838333,
"max": 0.662807285785675,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -21.255250930786133,
"min": -21.255250930786133,
"max": 157.0853271484375,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06805212741297198,
"min": 0.06483253724299562,
"max": 0.07381875635881606,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9527297837816077,
"min": 0.5144852739425935,
"max": 1.0490281150753922,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.017257428631072026,
"min": 0.0003259546251754348,
"max": 0.017257428631072026,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.24160400083500838,
"min": 0.002933591626578913,
"max": 0.24160400083500838,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.637004597221431e-06,
"min": 7.637004597221431e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010691806436110002,
"min": 0.00010691806436110002,
"max": 0.0032547161150946998,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10254563571428574,
"min": 0.10254563571428574,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4356389000000003,
"min": 1.3886848,
"max": 2.4423938,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026430900785714295,
"min": 0.00026430900785714295,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003700326110000001,
"min": 0.003700326110000001,
"max": 0.10851203947000003,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01597723551094532,
"min": 0.01597723551094532,
"max": 0.6487232446670532,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.22368130087852478,
"min": 0.22368130087852478,
"max": 4.541062831878662,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 678.7045454545455,
"min": 589.4509803921569,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29863.0,
"min": 15984.0,
"max": 33910.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.0030090587044305,
"min": -1.0000000521540642,
"max": 1.0030090587044305,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 44.13239858299494,
"min": -31.998401656746864,
"max": 45.929598830640316,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.0030090587044305,
"min": -1.0000000521540642,
"max": 1.0030090587044305,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 44.13239858299494,
"min": -31.998401656746864,
"max": 45.929598830640316,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.11248435904483566,
"min": 0.10194601255668954,
"max": 13.45548995770514,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.9493117979727685,
"min": 4.559003848757129,
"max": 215.28783932328224,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1689803019",
"python_version": "3.10.6 (main, May 29 2023, 11:10:38) [GCC 11.3.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1689805113"
},
"total": 2093.704646631,
"count": 1,
"self": 0.4839880209997318,
"children": {
"run_training.setup": {
"total": 0.048760985000171786,
"count": 1,
"self": 0.048760985000171786
},
"TrainerController.start_learning": {
"total": 2093.171897625,
"count": 1,
"self": 1.3180764618987268,
"children": {
"TrainerController._reset_env": {
"total": 5.3561736360002214,
"count": 1,
"self": 5.3561736360002214
},
"TrainerController.advance": {
"total": 2086.4049171281017,
"count": 63331,
"self": 1.3437824670663758,
"children": {
"env_step": {
"total": 1424.053160464035,
"count": 63331,
"self": 1319.0801552350576,
"children": {
"SubprocessEnvManager._take_step": {
"total": 104.16805083692452,
"count": 63331,
"self": 4.672769805818007,
"children": {
"TorchPolicy.evaluate": {
"total": 99.49528103110652,
"count": 62563,
"self": 99.49528103110652
}
}
},
"workers": {
"total": 0.8049543920528777,
"count": 63331,
"self": 0.0,
"children": {
"worker_root": {
"total": 2088.749662699087,
"count": 63331,
"is_parallel": true,
"self": 879.8682464541143,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002594572999896627,
"count": 1,
"is_parallel": true,
"self": 0.0007228780000332335,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018716949998633936,
"count": 8,
"is_parallel": true,
"self": 0.0018716949998633936
}
}
},
"UnityEnvironment.step": {
"total": 0.050544313000045804,
"count": 1,
"is_parallel": true,
"self": 0.0005786550004813762,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000497584999720857,
"count": 1,
"is_parallel": true,
"self": 0.000497584999720857
},
"communicator.exchange": {
"total": 0.047675266999704036,
"count": 1,
"is_parallel": true,
"self": 0.047675266999704036
},
"steps_from_proto": {
"total": 0.0017928060001395352,
"count": 1,
"is_parallel": true,
"self": 0.00036497400014923187,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014278319999903033,
"count": 8,
"is_parallel": true,
"self": 0.0014278319999903033
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1208.8814162449726,
"count": 63330,
"is_parallel": true,
"self": 33.70078335995504,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.386825297926407,
"count": 63330,
"is_parallel": true,
"self": 22.386825297926407
},
"communicator.exchange": {
"total": 1053.424484299027,
"count": 63330,
"is_parallel": true,
"self": 1053.424484299027
},
"steps_from_proto": {
"total": 99.36932328806415,
"count": 63330,
"is_parallel": true,
"self": 19.913307017788156,
"children": {
"_process_rank_one_or_two_observation": {
"total": 79.456016270276,
"count": 506640,
"is_parallel": true,
"self": 79.456016270276
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 661.0079741970003,
"count": 63331,
"self": 2.513784414975362,
"children": {
"process_trajectory": {
"total": 108.45604083902708,
"count": 63331,
"self": 108.25097340402681,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2050674350002737,
"count": 2,
"self": 0.2050674350002737
}
}
},
"_update_policy": {
"total": 550.0381489429979,
"count": 446,
"self": 362.0209358440143,
"children": {
"TorchPPOOptimizer.update": {
"total": 188.01721309898358,
"count": 22785,
"self": 188.01721309898358
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0499998097657226e-06,
"count": 1,
"self": 1.0499998097657226e-06
},
"TrainerController._save_models": {
"total": 0.0927293489994554,
"count": 1,
"self": 0.0013048450000496814,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09142450399940572,
"count": 1,
"self": 0.09142450399940572
}
}
}
}
}
}
}