zlicastro's picture
Initial training for SoccerTwos
8834fe0
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.7095866203308105,
"min": 1.6346657276153564,
"max": 3.295724630355835,
"count": 1000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 32331.703125,
"min": 15886.505859375,
"max": 106935.8828125,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 68.1891891891892,
"min": 42.51724137931034,
"max": 999.0,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 20184.0,
"min": 4064.0,
"max": 31672.0,
"count": 1000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1624.4653781669724,
"min": 1200.7492804441454,
"max": 1674.2392811020206,
"count": 995
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 240420.87596871192,
"min": 2401.4985608882907,
"max": 350677.0976863614,
"count": 995
},
"SoccerTwos.Step.mean": {
"value": 9999754.0,
"min": 9852.0,
"max": 9999754.0,
"count": 1000
},
"SoccerTwos.Step.sum": {
"value": 9999754.0,
"min": 9852.0,
"max": 9999754.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.06674957275390625,
"min": -0.14292249083518982,
"max": 0.23127475380897522,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -9.812187194824219,
"min": -23.620338439941406,
"max": 20.090007781982422,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.06506666541099548,
"min": -0.14639608561992645,
"max": 0.2365960031747818,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -9.564800262451172,
"min": -24.799217224121094,
"max": 19.72650718688965,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.16637551135757342,
"min": -0.8636363636363636,
"max": 0.526499998709187,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -24.457200169563293,
"min": -61.554800271987915,
"max": 48.464399933815,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.16637551135757342,
"min": -0.8636363636363636,
"max": 0.526499998709187,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -24.457200169563293,
"min": -61.554800271987915,
"max": 48.464399933815,
"count": 1000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.017985822345751027,
"min": 0.009780069771901859,
"max": 0.023544317518826574,
"count": 483
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.017985822345751027,
"min": 0.009780069771901859,
"max": 0.023544317518826574,
"count": 483
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.11273043851057689,
"min": 0.0008145160813000984,
"max": 0.12089608038465181,
"count": 483
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.11273043851057689,
"min": 0.0008145160813000984,
"max": 0.12089608038465181,
"count": 483
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.1146109846731027,
"min": 0.0008326663853949867,
"max": 0.12377538258830706,
"count": 483
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.1146109846731027,
"min": 0.0008326663853949867,
"max": 0.12377538258830706,
"count": 483
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.00025000000000000006,
"min": 0.00025,
"max": 0.00025000000000000006,
"count": 483
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.00025000000000000006,
"min": 0.00025,
"max": 0.00025000000000000006,
"count": 483
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 483
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 483
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 483
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 483
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675735224",
"python_version": "3.9.16 (main, Jan 11 2023, 16:16:36) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\Zack_\\.conda\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.13.0+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1675758911"
},
"total": 23686.7128649,
"count": 1,
"self": 0.6179039000016928,
"children": {
"run_training.setup": {
"total": 0.07541319999999985,
"count": 1,
"self": 0.07541319999999985
},
"TrainerController.start_learning": {
"total": 23686.0195478,
"count": 1,
"self": 13.615211899650603,
"children": {
"TrainerController._reset_env": {
"total": 3.2718694999986644,
"count": 67,
"self": 3.2718694999986644
},
"TrainerController.advance": {
"total": 23669.02731240035,
"count": 688115,
"self": 13.890435500514286,
"children": {
"env_step": {
"total": 9356.65186970014,
"count": 688115,
"self": 7013.77302250041,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2334.1941401001513,
"count": 688115,
"self": 74.07462089829278,
"children": {
"TorchPolicy.evaluate": {
"total": 2260.1195192018586,
"count": 1261072,
"self": 2260.1195192018586
}
}
},
"workers": {
"total": 8.684707099578374,
"count": 688115,
"self": 0.0,
"children": {
"worker_root": {
"total": 23666.057224099914,
"count": 688115,
"is_parallel": true,
"self": 18082.02282740076,
"children": {
"steps_from_proto": {
"total": 0.0895183000089328,
"count": 134,
"is_parallel": true,
"self": 0.018743700028351018,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.07077459998058178,
"count": 536,
"is_parallel": true,
"self": 0.07077459998058178
}
}
},
"UnityEnvironment.step": {
"total": 5583.944878399146,
"count": 688115,
"is_parallel": true,
"self": 268.0010310008265,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 235.70050469914983,
"count": 688115,
"is_parallel": true,
"self": 235.70050469914983
},
"communicator.exchange": {
"total": 4226.47750759974,
"count": 688115,
"is_parallel": true,
"self": 4226.47750759974
},
"steps_from_proto": {
"total": 853.7658350994291,
"count": 1376230,
"is_parallel": true,
"self": 181.8007143992943,
"children": {
"_process_rank_one_or_two_observation": {
"total": 671.9651207001348,
"count": 5504920,
"is_parallel": true,
"self": 671.9651207001348
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 14298.485007199695,
"count": 688115,
"self": 101.13273030048549,
"children": {
"process_trajectory": {
"total": 2334.821303999199,
"count": 688115,
"self": 2332.6081465991847,
"children": {
"RLTrainer._checkpoint": {
"total": 2.2131574000143246,
"count": 20,
"self": 2.2131574000143246
}
}
},
"_update_policy": {
"total": 11862.530972900011,
"count": 483,
"self": 1247.5723433000276,
"children": {
"TorchPOCAOptimizer.update": {
"total": 10614.958629599983,
"count": 14502,
"self": 10614.958629599983
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.999980541877449e-07,
"count": 1,
"self": 6.999980541877449e-07
},
"TrainerController._save_models": {
"total": 0.10515329999907408,
"count": 1,
"self": 0.0050449000009393785,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1001083999981347,
"count": 1,
"self": 0.1001083999981347
}
}
}
}
}
}
}