poca-SoccerTwos / run_logs /timers.json
software-vagabond's picture
First Push
8dc6077
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.8434988260269165,
"min": 1.8346599340438843,
"max": 3.2956979274749756,
"count": 862
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 36928.96875,
"min": 4740.29248046875,
"max": 150987.234375,
"count": 862
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 47.98,
"min": 43.309734513274336,
"max": 999.0,
"count": 862
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19192.0,
"min": 11996.0,
"max": 28644.0,
"count": 862
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1566.773003609935,
"min": 1173.1243076122191,
"max": 1582.0444014461386,
"count": 797
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 313354.60072198696,
"min": 2348.198127248749,
"max": 334040.74287821946,
"count": 797
},
"SoccerTwos.Step.mean": {
"value": 8619858.0,
"min": 9342.0,
"max": 8619858.0,
"count": 862
},
"SoccerTwos.Step.sum": {
"value": 8619858.0,
"min": 9342.0,
"max": 8619858.0,
"count": 862
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.018008800223469734,
"min": -0.12056028097867966,
"max": 0.22568106651306152,
"count": 862
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 3.601759910583496,
"min": -19.651325225830078,
"max": 34.01498031616211,
"count": 862
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.016617776826024055,
"min": -0.1251881718635559,
"max": 0.22502605617046356,
"count": 862
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 3.3235554695129395,
"min": -20.405672073364258,
"max": 35.035430908203125,
"count": 862
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 862
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 862
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.05361400067806244,
"min": -0.6412761892591204,
"max": 0.4861466586589813,
"count": 862
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 10.722800135612488,
"min": -67.88499993085861,
"max": 74.02039980888367,
"count": 862
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.05361400067806244,
"min": -0.6412761892591204,
"max": 0.4861466586589813,
"count": 862
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 10.722800135612488,
"min": -67.88499993085861,
"max": 74.02039980888367,
"count": 862
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 862
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 862
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.013608232833697305,
"min": 0.010440699631969134,
"max": 0.023799832879255214,
"count": 409
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.013608232833697305,
"min": 0.010440699631969134,
"max": 0.023799832879255214,
"count": 409
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10985415776570638,
"min": 1.2019552552070914e-06,
"max": 0.11892705634236336,
"count": 409
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10985415776570638,
"min": 1.2019552552070914e-06,
"max": 0.11892705634236336,
"count": 409
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.11099382241566975,
"min": 1.1897383368856633e-06,
"max": 0.12036320293943087,
"count": 409
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.11099382241566975,
"min": 1.1897383368856633e-06,
"max": 0.12036320293943087,
"count": 409
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 409
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 409
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 409
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 409
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 409
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 409
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1685874103",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/home/user/.virtualenvs/rl-course-unit7/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1685891765"
},
"total": 17661.997806900006,
"count": 1,
"self": 0.3408695000252919,
"children": {
"run_training.setup": {
"total": 0.009274399999412708,
"count": 1,
"self": 0.009274399999412708
},
"TrainerController.start_learning": {
"total": 17661.64766299998,
"count": 1,
"self": 13.799717194051482,
"children": {
"TrainerController._reset_env": {
"total": 30.533092100100475,
"count": 43,
"self": 30.533092100100475
},
"TrainerController.advance": {
"total": 17616.84690400584,
"count": 579121,
"self": 13.594753412296996,
"children": {
"env_step": {
"total": 13016.45070159607,
"count": 579121,
"self": 7400.9529640890105,
"children": {
"SubprocessEnvManager._take_step": {
"total": 5607.194474806223,
"count": 579121,
"self": 92.68533239483077,
"children": {
"TorchPolicy.evaluate": {
"total": 5514.5091424113925,
"count": 1095438,
"self": 5514.5091424113925
}
}
},
"workers": {
"total": 8.303262700836058,
"count": 579121,
"self": 0.0,
"children": {
"worker_root": {
"total": 17632.79518169303,
"count": 579121,
"is_parallel": true,
"self": 11662.593824684722,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.013703799995710142,
"count": 2,
"is_parallel": true,
"self": 0.00627020001411438,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.007433599981595762,
"count": 8,
"is_parallel": true,
"self": 0.007433599981595762
}
}
},
"UnityEnvironment.step": {
"total": 0.19131019999622367,
"count": 1,
"is_parallel": true,
"self": 0.001357399974949658,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.04366450000088662,
"count": 1,
"is_parallel": true,
"self": 0.04366450000088662
},
"communicator.exchange": {
"total": 0.14161740000417922,
"count": 1,
"is_parallel": true,
"self": 0.14161740000417922
},
"steps_from_proto": {
"total": 0.004670900016208179,
"count": 2,
"is_parallel": true,
"self": 0.0012376000377116725,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003433299978496507,
"count": 8,
"is_parallel": true,
"self": 0.003433299978496507
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 5970.13942190817,
"count": 579120,
"is_parallel": true,
"self": 295.2301604988461,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 208.11454050992324,
"count": 579120,
"is_parallel": true,
"self": 208.11454050992324
},
"communicator.exchange": {
"total": 4580.229337689278,
"count": 579120,
"is_parallel": true,
"self": 4580.229337689278
},
"steps_from_proto": {
"total": 886.5653832101234,
"count": 1158240,
"is_parallel": true,
"self": 185.6499321908923,
"children": {
"_process_rank_one_or_two_observation": {
"total": 700.9154510192311,
"count": 4632960,
"is_parallel": true,
"self": 700.9154510192311
}
}
}
}
},
"steps_from_proto": {
"total": 0.06193510013690684,
"count": 84,
"is_parallel": true,
"self": 0.013425699682557024,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.048509400454349816,
"count": 336,
"is_parallel": true,
"self": 0.048509400454349816
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 4586.801448997474,
"count": 579121,
"self": 92.61359740051557,
"children": {
"process_trajectory": {
"total": 2136.028718097048,
"count": 579121,
"self": 2128.789415196996,
"children": {
"RLTrainer._checkpoint": {
"total": 7.23930290005228,
"count": 17,
"self": 7.23930290005228
}
}
},
"_update_policy": {
"total": 2358.1591334999102,
"count": 409,
"self": 1090.5400891008903,
"children": {
"TorchPOCAOptimizer.update": {
"total": 1267.61904439902,
"count": 12279,
"self": 1267.61904439902
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.200009137392044e-06,
"count": 1,
"self": 1.200009137392044e-06
},
"TrainerController._save_models": {
"total": 0.46794849997968413,
"count": 1,
"self": 0.013114499975927174,
"children": {
"RLTrainer._checkpoint": {
"total": 0.45483400000375696,
"count": 1,
"self": 0.45483400000375696
}
}
}
}
}
}
}