ppo-Huggy / run_logs /timers.json
kucharskipj's picture
Huggy
7b1f7b6
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4041372537612915,
"min": 1.40413498878479,
"max": 1.4252727031707764,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70477.859375,
"min": 67443.578125,
"max": 77035.375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 111.17792792792793,
"min": 101.93139293139293,
"max": 435.4,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49363.0,
"min": 48953.0,
"max": 50275.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999963.0,
"min": 49436.0,
"max": 1999963.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999963.0,
"min": 49436.0,
"max": 1999963.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3473093509674072,
"min": -0.04926830902695656,
"max": 2.3473093509674072,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1042.205322265625,
"min": -5.616587162017822,
"max": 1089.499755859375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.6829647063403517,
"min": 1.7630482463721644,
"max": 3.892506307004446,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1635.2363296151161,
"min": 200.98750008642673,
"max": 1692.9491168260574,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.6829647063403517,
"min": 1.7630482463721644,
"max": 3.892506307004446,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1635.2363296151161,
"min": 200.98750008642673,
"max": 1692.9491168260574,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.019050138829819235,
"min": 0.013711894334604343,
"max": 0.019050138829819235,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.03810027765963847,
"min": 0.027423788669208686,
"max": 0.056025167550736416,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04585328989972671,
"min": 0.022759516475101313,
"max": 0.05633031943192085,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.09170657979945342,
"min": 0.045519032950202626,
"max": 0.16696945975224176,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.433873522075005e-06,
"min": 4.433873522075005e-06,
"max": 0.0002953236015588,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 8.86774704415001e-06,
"min": 8.86774704415001e-06,
"max": 0.0008439726186758001,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10147792500000001,
"min": 0.10147792500000001,
"max": 0.1984412,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20295585000000002,
"min": 0.20295585000000002,
"max": 0.5813241999999998,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.374845750000012e-05,
"min": 8.374845750000012e-05,
"max": 0.0049222158800000014,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00016749691500000024,
"min": 0.00016749691500000024,
"max": 0.014068077580000001,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678922760",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1678925212"
},
"total": 2451.780229306,
"count": 1,
"self": 0.3885880980001275,
"children": {
"run_training.setup": {
"total": 0.1188868510000134,
"count": 1,
"self": 0.1188868510000134
},
"TrainerController.start_learning": {
"total": 2451.272754357,
"count": 1,
"self": 4.46860673304127,
"children": {
"TrainerController._reset_env": {
"total": 9.192531967000008,
"count": 1,
"self": 9.192531967000008
},
"TrainerController.advance": {
"total": 2437.491365653959,
"count": 230913,
"self": 4.83964995702263,
"children": {
"env_step": {
"total": 1906.4031166359316,
"count": 230913,
"self": 1607.8986496128823,
"children": {
"SubprocessEnvManager._take_step": {
"total": 295.44157585206597,
"count": 230913,
"self": 17.45589276903604,
"children": {
"TorchPolicy.evaluate": {
"total": 277.9856830830299,
"count": 222897,
"self": 277.9856830830299
}
}
},
"workers": {
"total": 3.062891170983278,
"count": 230913,
"self": 0.0,
"children": {
"worker_root": {
"total": 2442.957639986973,
"count": 230913,
"is_parallel": true,
"self": 1134.0454963640118,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009934320000013486,
"count": 1,
"is_parallel": true,
"self": 0.0002621779999856244,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007312540000157242,
"count": 2,
"is_parallel": true,
"self": 0.0007312540000157242
}
}
},
"UnityEnvironment.step": {
"total": 0.0696399999999926,
"count": 1,
"is_parallel": true,
"self": 0.00034410299997489346,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003803600000082952,
"count": 1,
"is_parallel": true,
"self": 0.0003803600000082952
},
"communicator.exchange": {
"total": 0.06819756900000584,
"count": 1,
"is_parallel": true,
"self": 0.06819756900000584
},
"steps_from_proto": {
"total": 0.000717968000003566,
"count": 1,
"is_parallel": true,
"self": 0.00019705100001488063,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005209169999886853,
"count": 2,
"is_parallel": true,
"self": 0.0005209169999886853
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1308.9121436229611,
"count": 230912,
"is_parallel": true,
"self": 39.63001980686863,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 79.12516590996461,
"count": 230912,
"is_parallel": true,
"self": 79.12516590996461
},
"communicator.exchange": {
"total": 1098.9656550750578,
"count": 230912,
"is_parallel": true,
"self": 1098.9656550750578
},
"steps_from_proto": {
"total": 91.19130283107015,
"count": 230912,
"is_parallel": true,
"self": 33.718578399068036,
"children": {
"_process_rank_one_or_two_observation": {
"total": 57.47272443200211,
"count": 461824,
"is_parallel": true,
"self": 57.47272443200211
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 526.2485990610045,
"count": 230913,
"self": 7.2637495620875825,
"children": {
"process_trajectory": {
"total": 142.26541541891643,
"count": 230913,
"self": 140.9992591899165,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2661562289999324,
"count": 10,
"self": 1.2661562289999324
}
}
},
"_update_policy": {
"total": 376.71943408000044,
"count": 96,
"self": 316.97932662200117,
"children": {
"TorchPPOOptimizer.update": {
"total": 59.74010745799927,
"count": 2880,
"self": 59.74010745799927
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.247000000148546e-06,
"count": 1,
"self": 1.247000000148546e-06
},
"TrainerController._save_models": {
"total": 0.12024875599990992,
"count": 1,
"self": 0.002235479999853851,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11801327600005607,
"count": 1,
"self": 0.11801327600005607
}
}
}
}
}
}
}