poca-SoccerTwos / run_logs /timers.json
bjarlestam's picture
Rock n roll 2
abb516f
raw
history blame
15.1 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.1999590396881104,
"min": 3.1999006271362305,
"max": 3.241605281829834,
"count": 37
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 66866.34375,
"min": 29757.73046875,
"max": 103708.4140625,
"count": 37
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 980.2,
"min": 354.26666666666665,
"max": 999.0,
"count": 37
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19604.0,
"min": 8220.0,
"max": 28456.0,
"count": 37
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1211.9378862333965,
"min": 1205.1835371958437,
"max": 1215.6735285543373,
"count": 35
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 2423.875772466793,
"min": 2412.7680449870254,
"max": 26723.470354880505,
"count": 35
},
"SoccerTwos.Step.mean": {
"value": 699154.0,
"min": 339906.0,
"max": 699154.0,
"count": 37
},
"SoccerTwos.Step.sum": {
"value": 699154.0,
"min": 339906.0,
"max": 699154.0,
"count": 37
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.004377217032015324,
"min": -0.011403935961425304,
"max": 0.00670193787664175,
"count": 37
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.043772172182798386,
"min": -0.21572968363761902,
"max": 0.09587319940328598,
"count": 37
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.0028003088664263487,
"min": -0.013163342140614986,
"max": 0.006617785431444645,
"count": 37
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.028003089129924774,
"min": -0.23477448523044586,
"max": 0.09730030596256256,
"count": 37
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 37
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 37
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.2,
"min": -0.5555200020472209,
"max": 0.3996470535502714,
"count": 37
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -2.0,
"min": -9.0,
"max": 6.793999910354614,
"count": 37
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.2,
"min": -0.5555200020472209,
"max": 0.3996470535502714,
"count": 37
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -2.0,
"min": -9.0,
"max": 6.793999910354614,
"count": 37
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 37
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 37
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.0159091107120427,
"min": 0.012636019413669904,
"max": 0.020098431833321228,
"count": 17
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.0159091107120427,
"min": 0.012636019413669904,
"max": 0.020098431833321228,
"count": 17
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.002516983779302488,
"min": 0.00014130498408727968,
"max": 0.0086365083232522,
"count": 17
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.002516983779302488,
"min": 0.00014130498408727968,
"max": 0.0086365083232522,
"count": 17
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.002624650854462137,
"min": 0.00014110098109085812,
"max": 0.008632027031853794,
"count": 17
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.002624650854462137,
"min": 0.00014110098109085812,
"max": 0.008632027031853794,
"count": 17
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 17
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 17
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.25,
"min": 0.25,
"max": 0.25,
"count": 17
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.25,
"min": 0.25,
"max": 0.25,
"count": 17
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.010000000000000002,
"min": 0.010000000000000002,
"max": 0.010000000000000002,
"count": 17
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.010000000000000002,
"min": 0.010000000000000002,
"max": 0.010000000000000002,
"count": 17
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1683407536",
"python_version": "3.9.16 | packaged by conda-forge | (main, Feb 1 2023, 21:38:11) \n[Clang 14.0.6 ]",
"command_line_arguments": "/Users/andreas.bjarlestam/mambaforge/envs/huggingface-rl-course/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0",
"numpy_version": "1.21.2",
"end_time_seconds": "1683409217"
},
"total": 1680.865004125,
"count": 1,
"self": 0.12260970800002724,
"children": {
"run_training.setup": {
"total": 0.013448125000000033,
"count": 1,
"self": 0.013448125000000033
},
"TrainerController.start_learning": {
"total": 1680.728946292,
"count": 1,
"self": 0.2918064929965567,
"children": {
"TrainerController._reset_env": {
"total": 1.733552874999984,
"count": 3,
"self": 1.733552874999984
},
"TrainerController.advance": {
"total": 1678.5335023400037,
"count": 24247,
"self": 0.28358365201415836,
"children": {
"env_step": {
"total": 1387.8335258989998,
"count": 24247,
"self": 1339.7047586980023,
"children": {
"SubprocessEnvManager._take_step": {
"total": 47.923312614988625,
"count": 24247,
"self": 1.4153185770023171,
"children": {
"TorchPolicy.evaluate": {
"total": 46.50799403798631,
"count": 48068,
"self": 46.50799403798631
}
}
},
"workers": {
"total": 0.20545458600886946,
"count": 24246,
"self": 0.0,
"children": {
"worker_root": {
"total": 1678.4846704740066,
"count": 24246,
"is_parallel": true,
"self": 380.76107760500213,
"children": {
"steps_from_proto": {
"total": 0.005257666000014982,
"count": 6,
"is_parallel": true,
"self": 0.0006387100000131429,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.004618956000001839,
"count": 24,
"is_parallel": true,
"self": 0.004618956000001839
}
}
},
"UnityEnvironment.step": {
"total": 1297.7183352030045,
"count": 24246,
"is_parallel": true,
"self": 3.2725019259994497,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.31964003499131,
"count": 24246,
"is_parallel": true,
"self": 22.31964003499131
},
"communicator.exchange": {
"total": 1224.7218267430092,
"count": 24246,
"is_parallel": true,
"self": 1224.7218267430092
},
"steps_from_proto": {
"total": 47.40436649900448,
"count": 48492,
"is_parallel": true,
"self": 5.1485635459650325,
"children": {
"_process_rank_one_or_two_observation": {
"total": 42.25580295303945,
"count": 193968,
"is_parallel": true,
"self": 42.25580295303945
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 290.41639278898964,
"count": 24246,
"self": 2.2689823499720774,
"children": {
"process_trajectory": {
"total": 40.91866668901776,
"count": 24246,
"self": 40.91866668901776
},
"_update_policy": {
"total": 247.2287437499998,
"count": 17,
"self": 32.059548280997035,
"children": {
"TorchPOCAOptimizer.update": {
"total": 215.16919546900277,
"count": 510,
"self": 215.16919546900277
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.839999630552484e-07,
"count": 1,
"self": 5.839999630552484e-07
},
"TrainerController._save_models": {
"total": 0.17008399999986068,
"count": 1,
"self": 0.001209707999805687,
"children": {
"RLTrainer._checkpoint": {
"total": 0.168874292000055,
"count": 1,
"self": 0.168874292000055
}
}
}
}
}
}
}