poca-SoccerTwos / run_logs /timers.json
iblub's picture
First Push`
ed707a7
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.6265735626220703,
"min": 1.5484353303909302,
"max": 3.2956793308258057,
"count": 1221
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 34249.1328125,
"min": 13981.4033203125,
"max": 115104.0546875,
"count": 1221
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 56.04597701149425,
"min": 42.258620689655174,
"max": 999.0,
"count": 1221
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19504.0,
"min": 11456.0,
"max": 30252.0,
"count": 1221
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1620.0417341444238,
"min": 1196.3466733634127,
"max": 1642.5166234869769,
"count": 1215
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 281887.26174112974,
"min": 2394.535852594916,
"max": 358747.0237853759,
"count": 1215
},
"SoccerTwos.Step.mean": {
"value": 12209946.0,
"min": 9142.0,
"max": 12209946.0,
"count": 1221
},
"SoccerTwos.Step.sum": {
"value": 12209946.0,
"min": 9142.0,
"max": 12209946.0,
"count": 1221
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.011582380160689354,
"min": -0.12345093488693237,
"max": 0.18368637561798096,
"count": 1221
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -2.02691650390625,
"min": -23.949481964111328,
"max": 32.85422134399414,
"count": 1221
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.007369144819676876,
"min": -0.12794145941734314,
"max": 0.18581099808216095,
"count": 1221
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -1.2896003723144531,
"min": -24.82064437866211,
"max": 33.63179016113281,
"count": 1221
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1221
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1221
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.05773142780576433,
"min": -0.6875,
"max": 0.5502645152230417,
"count": 1221
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 10.102999866008759,
"min": -61.78280007839203,
"max": 68.23279988765717,
"count": 1221
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.05773142780576433,
"min": -0.6875,
"max": 0.5502645152230417,
"count": 1221
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 10.102999866008759,
"min": -61.78280007839203,
"max": 68.23279988765717,
"count": 1221
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1221
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1221
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.016578550640163787,
"min": 0.010766705235194724,
"max": 0.02550353014181989,
"count": 590
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.016578550640163787,
"min": 0.010766705235194724,
"max": 0.02550353014181989,
"count": 590
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10681834295392037,
"min": 0.0007180829411178517,
"max": 0.12248274932305019,
"count": 590
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10681834295392037,
"min": 0.0007180829411178517,
"max": 0.12248274932305019,
"count": 590
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10834903717041015,
"min": 0.0007234384558008363,
"max": 0.12479151934385299,
"count": 590
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10834903717041015,
"min": 0.0007234384558008363,
"max": 0.12479151934385299,
"count": 590
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 590
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 590
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 590
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 590
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 590
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 590
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677412004",
"python_version": "3.9.16 (main, Jan 11 2023, 16:16:36) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\Cereza\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.13.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1677451209"
},
"total": 39204.465535,
"count": 1,
"self": 0.49378710000019055,
"children": {
"run_training.setup": {
"total": 0.07150319999999999,
"count": 1,
"self": 0.07150319999999999
},
"TrainerController.start_learning": {
"total": 39203.9002447,
"count": 1,
"self": 16.627648001129273,
"children": {
"TrainerController._reset_env": {
"total": 7.107990100005867,
"count": 62,
"self": 7.107990100005867
},
"TrainerController.advance": {
"total": 39180.00858479887,
"count": 837499,
"self": 15.177543395766406,
"children": {
"env_step": {
"total": 11952.54620950334,
"count": 837499,
"self": 9220.515913203917,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2721.0198986993987,
"count": 837499,
"self": 92.35025679878026,
"children": {
"TorchPolicy.evaluate": {
"total": 2628.6696419006184,
"count": 1537658,
"self": 2628.6696419006184
}
}
},
"workers": {
"total": 11.01039760002432,
"count": 837499,
"self": 0.0,
"children": {
"worker_root": {
"total": 39173.51721440323,
"count": 837499,
"is_parallel": true,
"self": 31786.75880220276,
"children": {
"steps_from_proto": {
"total": 0.10280219999157492,
"count": 124,
"is_parallel": true,
"self": 0.018019800023632015,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0847823999679429,
"count": 496,
"is_parallel": true,
"self": 0.0847823999679429
}
}
},
"UnityEnvironment.step": {
"total": 7386.655610000482,
"count": 837499,
"is_parallel": true,
"self": 425.4859668017907,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 347.03107059961445,
"count": 837499,
"is_parallel": true,
"self": 347.03107059961445
},
"communicator.exchange": {
"total": 5268.140466000155,
"count": 837499,
"is_parallel": true,
"self": 5268.140466000155
},
"steps_from_proto": {
"total": 1345.998106598921,
"count": 1674998,
"is_parallel": true,
"self": 239.50459289770106,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1106.4935137012199,
"count": 6699992,
"is_parallel": true,
"self": 1106.4935137012199
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 27212.284831899764,
"count": 837499,
"self": 130.58228550038984,
"children": {
"process_trajectory": {
"total": 4157.984527899309,
"count": 837499,
"self": 4155.143391799306,
"children": {
"RLTrainer._checkpoint": {
"total": 2.8411361000028137,
"count": 24,
"self": 2.8411361000028137
}
}
},
"_update_policy": {
"total": 22923.718018500065,
"count": 590,
"self": 1851.5352701003249,
"children": {
"TorchPOCAOptimizer.update": {
"total": 21072.18274839974,
"count": 17703,
"self": 21072.18274839974
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.999995770864189e-07,
"count": 1,
"self": 8.999995770864189e-07
},
"TrainerController._save_models": {
"total": 0.1560209000017494,
"count": 1,
"self": 0.03218120000383351,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12383969999791589,
"count": 1,
"self": 0.12383969999791589
}
}
}
}
}
}
}