ppo-Huggy / run_logs /timers.json
michelle-lohwt's picture
Huggy
62a32d4
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.401455283164978,
"min": 1.401455283164978,
"max": 1.4277291297912598,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71360.703125,
"min": 68641.4921875,
"max": 77368.3984375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 85.92521739130434,
"min": 76.24884080370943,
"max": 413.59504132231405,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49407.0,
"min": 48698.0,
"max": 50112.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999990.0,
"min": 49828.0,
"max": 1999990.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999990.0,
"min": 49828.0,
"max": 1999990.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4142558574676514,
"min": 0.03830129653215408,
"max": 2.4650509357452393,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1388.1971435546875,
"min": 4.596155643463135,
"max": 1564.52294921875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7979303968471028,
"min": 1.8002182627717653,
"max": 3.9967357784409985,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2183.809978187084,
"min": 216.02619153261185,
"max": 2512.215627491474,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7979303968471028,
"min": 1.8002182627717653,
"max": 3.9967357784409985,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2183.809978187084,
"min": 216.02619153261185,
"max": 2512.215627491474,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016067106476111804,
"min": 0.013872415076669616,
"max": 0.01994042851923344,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.048201319428335415,
"min": 0.027744830153339232,
"max": 0.05382941369898617,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.055282579527960875,
"min": 0.023213793399433295,
"max": 0.060492293909192085,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16584773858388263,
"min": 0.04642758679886659,
"max": 0.17369761193792027,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.612048796016663e-06,
"min": 3.612048796016663e-06,
"max": 0.00029527155157615,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.083614638804999e-05,
"min": 1.083614638804999e-05,
"max": 0.00084372736875755,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10120398333333334,
"min": 0.10120398333333334,
"max": 0.19842385000000004,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30361195,
"min": 0.20756695,
"max": 0.5812424500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.007876833333329e-05,
"min": 7.007876833333329e-05,
"max": 0.0049213501150000005,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021023630499999988,
"min": 0.00021023630499999988,
"max": 0.014063998255,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679273054",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679275355"
},
"total": 2301.256727475,
"count": 1,
"self": 0.4404230000000098,
"children": {
"run_training.setup": {
"total": 0.10184205300004123,
"count": 1,
"self": 0.10184205300004123
},
"TrainerController.start_learning": {
"total": 2300.714462422,
"count": 1,
"self": 4.1486054960150796,
"children": {
"TrainerController._reset_env": {
"total": 9.028095510000014,
"count": 1,
"self": 9.028095510000014
},
"TrainerController.advance": {
"total": 2287.413577807985,
"count": 232927,
"self": 4.431928242961931,
"children": {
"env_step": {
"total": 1782.4409763830367,
"count": 232927,
"self": 1505.3782557980542,
"children": {
"SubprocessEnvManager._take_step": {
"total": 274.3113776880025,
"count": 232927,
"self": 16.012020883023297,
"children": {
"TorchPolicy.evaluate": {
"total": 258.2993568049792,
"count": 223010,
"self": 258.2993568049792
}
}
},
"workers": {
"total": 2.751342896980134,
"count": 232927,
"self": 0.0,
"children": {
"worker_root": {
"total": 2292.988215635043,
"count": 232927,
"is_parallel": true,
"self": 1069.992337013039,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009708680000244385,
"count": 1,
"is_parallel": true,
"self": 0.00024432800000795396,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007265400000164846,
"count": 2,
"is_parallel": true,
"self": 0.0007265400000164846
}
}
},
"UnityEnvironment.step": {
"total": 0.040814267999962794,
"count": 1,
"is_parallel": true,
"self": 0.00029623399996125954,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001932799999622148,
"count": 1,
"is_parallel": true,
"self": 0.0001932799999622148
},
"communicator.exchange": {
"total": 0.039683109000009154,
"count": 1,
"is_parallel": true,
"self": 0.039683109000009154
},
"steps_from_proto": {
"total": 0.0006416450000301666,
"count": 1,
"is_parallel": true,
"self": 0.00019613500001014472,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00044551000002002183,
"count": 2,
"is_parallel": true,
"self": 0.00044551000002002183
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1222.995878622004,
"count": 232926,
"is_parallel": true,
"self": 37.3250682368689,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 75.84585454105331,
"count": 232926,
"is_parallel": true,
"self": 75.84585454105331
},
"communicator.exchange": {
"total": 1022.8719801690434,
"count": 232926,
"is_parallel": true,
"self": 1022.8719801690434
},
"steps_from_proto": {
"total": 86.9529756750386,
"count": 232926,
"is_parallel": true,
"self": 32.53670317205393,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.41627250298467,
"count": 465852,
"is_parallel": true,
"self": 54.41627250298467
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 500.5406731819861,
"count": 232927,
"self": 6.738287842982743,
"children": {
"process_trajectory": {
"total": 141.79717609500358,
"count": 232927,
"self": 140.45874969100373,
"children": {
"RLTrainer._checkpoint": {
"total": 1.338426403999847,
"count": 10,
"self": 1.338426403999847
}
}
},
"_update_policy": {
"total": 352.0052092439998,
"count": 97,
"self": 294.3264176600115,
"children": {
"TorchPPOOptimizer.update": {
"total": 57.6787915839883,
"count": 2910,
"self": 57.6787915839883
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.420000424142927e-07,
"count": 1,
"self": 9.420000424142927e-07
},
"TrainerController._save_models": {
"total": 0.12418266599979688,
"count": 1,
"self": 0.0020996799999011273,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12208298599989575,
"count": 1,
"self": 0.12208298599989575
}
}
}
}
}
}
}