poca-SoccerTwos / run_logs /timers.json
bjarlestam's picture
7.2 million steps
c6454ad
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.5318515300750732,
"min": 2.5223515033721924,
"max": 2.5813586711883545,
"count": 18
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 49259.703125,
"min": 47095.140625,
"max": 53957.95703125,
"count": 18
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 56.67816091954023,
"min": 54.18681318681319,
"max": 76.01538461538462,
"count": 18
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19724.0,
"min": 19148.0,
"max": 20264.0,
"count": 18
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1679.7317393981489,
"min": 1656.1921875095022,
"max": 1679.7317393981489,
"count": 18
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 292273.3226552779,
"min": 215649.28115044435,
"max": 305145.5088457796,
"count": 18
},
"SoccerTwos.Step.mean": {
"value": 7219992.0,
"min": 7049887.0,
"max": 7219992.0,
"count": 18
},
"SoccerTwos.Step.sum": {
"value": 7219992.0,
"min": 7049887.0,
"max": 7219992.0,
"count": 18
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.008738174103200436,
"min": -0.07982506603002548,
"max": 0.008738174103200436,
"count": 18
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 1.520442247390747,
"min": -12.13340950012207,
"max": 1.520442247390747,
"count": 18
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.009255587123334408,
"min": -0.0813833549618721,
"max": 0.009255587123334408,
"count": 18
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 1.6104722023010254,
"min": -12.370269775390625,
"max": 1.6104722023010254,
"count": 18
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 18
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 18
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.1608482739706149,
"min": -0.20066532969474793,
"max": 0.1608482739706149,
"count": 18
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 27.987599670886993,
"min": -33.53679966926575,
"max": 27.987599670886993,
"count": 18
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.1608482739706149,
"min": -0.20066532969474793,
"max": 0.1608482739706149,
"count": 18
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 27.987599670886993,
"min": -33.53679966926575,
"max": 27.987599670886993,
"count": 18
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 18
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 18
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.016063779070585347,
"min": 0.015077678242232651,
"max": 0.02123676067761456,
"count": 8
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.016063779070585347,
"min": 0.015077678242232651,
"max": 0.02123676067761456,
"count": 8
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.06546799217661221,
"min": 0.06479480353494485,
"max": 0.06966093542675177,
"count": 8
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.06546799217661221,
"min": 0.06479480353494485,
"max": 0.06966093542675177,
"count": 8
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.06590312644839287,
"min": 0.0653505644450585,
"max": 0.07026846731702487,
"count": 8
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.06590312644839287,
"min": 0.0653505644450585,
"max": 0.07026846731702487,
"count": 8
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.00019999999999999996,
"min": 0.00019999999999999996,
"max": 0.00019999999999999996,
"count": 8
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.00019999999999999996,
"min": 0.00019999999999999996,
"max": 0.00019999999999999996,
"count": 8
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.25,
"min": 0.25,
"max": 0.25,
"count": 8
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.25,
"min": 0.25,
"max": 0.25,
"count": 8
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.010000000000000002,
"min": 0.010000000000000002,
"max": 0.010000000000000002,
"count": 8
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.010000000000000002,
"min": 0.010000000000000002,
"max": 0.010000000000000002,
"count": 8
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1683492227",
"python_version": "3.9.16 | packaged by conda-forge | (main, Feb 1 2023, 21:38:11) \n[Clang 14.0.6 ]",
"command_line_arguments": "/Users/andreas.bjarlestam/mambaforge/envs/huggingface-rl-course/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0",
"numpy_version": "1.21.2",
"end_time_seconds": "1683493071"
},
"total": 843.9403655000001,
"count": 1,
"self": 0.14983404100019015,
"children": {
"run_training.setup": {
"total": 0.023452833999999978,
"count": 1,
"self": 0.023452833999999978
},
"TrainerController.start_learning": {
"total": 843.767078625,
"count": 1,
"self": 0.15948642700573146,
"children": {
"TrainerController._reset_env": {
"total": 1.5302018339999996,
"count": 2,
"self": 1.5302018339999996
},
"TrainerController.advance": {
"total": 841.9261699059942,
"count": 13040,
"self": 0.13282511599504687,
"children": {
"env_step": {
"total": 681.1283580699985,
"count": 13040,
"self": 657.7465356490007,
"children": {
"SubprocessEnvManager._take_step": {
"total": 23.280398930000096,
"count": 13040,
"self": 0.6736503810056327,
"children": {
"TorchPolicy.evaluate": {
"total": 22.606748548994464,
"count": 23622,
"self": 22.606748548994464
}
}
},
"workers": {
"total": 0.10142349099767767,
"count": 13039,
"self": 0.0,
"children": {
"worker_root": {
"total": 841.8678021200012,
"count": 13039,
"is_parallel": true,
"self": 202.98945657000388,
"children": {
"steps_from_proto": {
"total": 0.0034942079999997766,
"count": 4,
"is_parallel": true,
"self": 0.0004312480000001617,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003062959999999615,
"count": 16,
"is_parallel": true,
"self": 0.003062959999999615
}
}
},
"UnityEnvironment.step": {
"total": 638.8748513419973,
"count": 13039,
"is_parallel": true,
"self": 1.6371326879955177,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 11.2325981490038,
"count": 13039,
"is_parallel": true,
"self": 11.2325981490038
},
"communicator.exchange": {
"total": 602.5734008570001,
"count": 13039,
"is_parallel": true,
"self": 602.5734008570001
},
"steps_from_proto": {
"total": 23.431719647997838,
"count": 26078,
"is_parallel": true,
"self": 2.6071703399854265,
"children": {
"_process_rank_one_or_two_observation": {
"total": 20.82454930801241,
"count": 104312,
"is_parallel": true,
"self": 20.82454930801241
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 160.6649867200007,
"count": 13039,
"self": 1.0906056580058987,
"children": {
"process_trajectory": {
"total": 32.77911264699492,
"count": 13039,
"self": 32.77911264699492
},
"_update_policy": {
"total": 126.79526841499991,
"count": 9,
"self": 15.891884914000315,
"children": {
"TorchPOCAOptimizer.update": {
"total": 110.9033835009996,
"count": 270,
"self": 110.9033835009996
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.24999984211172e-07,
"count": 1,
"self": 6.24999984211172e-07
},
"TrainerController._save_models": {
"total": 0.1512198330000274,
"count": 1,
"self": 0.0007593750000296495,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15046045799999774,
"count": 1,
"self": 0.15046045799999774
}
}
}
}
}
}
}