poca-SoccerTwos / run_logs /timers.json
HanLiii's picture
First Push
d4d6336
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.4511103630065918,
"min": 1.424573540687561,
"max": 3.295682668685913,
"count": 1753
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 26979.04296875,
"min": 25946.181640625,
"max": 105461.84375,
"count": 1753
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 77.61904761904762,
"min": 40.96638655462185,
"max": 999.0,
"count": 1753
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19560.0,
"min": 8416.0,
"max": 27620.0,
"count": 1753
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1615.503282496499,
"min": 1202.0326333979717,
"max": 1710.7366365702744,
"count": 1752
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 203553.41359455886,
"min": 2405.3037547417584,
"max": 389885.1082996513,
"count": 1752
},
"SoccerTwos.Step.mean": {
"value": 17529908.0,
"min": 9828.0,
"max": 17529908.0,
"count": 1753
},
"SoccerTwos.Step.sum": {
"value": 17529908.0,
"min": 9828.0,
"max": 17529908.0,
"count": 1753
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.0222158320248127,
"min": -0.1273474246263504,
"max": 0.16890715062618256,
"count": 1753
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -2.799194812774658,
"min": -19.033733367919922,
"max": 29.889747619628906,
"count": 1753
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.02260405384004116,
"min": -0.12866853177547455,
"max": 0.16223177313804626,
"count": 1753
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -2.8481106758117676,
"min": -19.8258056640625,
"max": 30.364227294921875,
"count": 1753
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1753
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1753
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.024958727851746573,
"min": -0.6928956508636475,
"max": 0.5539918329034533,
"count": 1753
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -3.1447997093200684,
"min": -63.82279986143112,
"max": 67.08880001306534,
"count": 1753
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.024958727851746573,
"min": -0.6928956508636475,
"max": 0.5539918329034533,
"count": 1753
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -3.1447997093200684,
"min": -63.82279986143112,
"max": 67.08880001306534,
"count": 1753
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1753
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1753
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01969123772966365,
"min": 0.011068875553125206,
"max": 0.028425948153017088,
"count": 849
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01969123772966365,
"min": 0.011068875553125206,
"max": 0.028425948153017088,
"count": 849
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.09342529103159905,
"min": 0.001761433262921249,
"max": 0.12119288568695387,
"count": 849
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.09342529103159905,
"min": 0.001761433262921249,
"max": 0.12119288568695387,
"count": 849
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09476606398820878,
"min": 0.001802619556353117,
"max": 0.12373307670156161,
"count": 849
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.09476606398820878,
"min": 0.001802619556353117,
"max": 0.12373307670156161,
"count": 849
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 849
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 849
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 849
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 849
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 849
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 849
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1685785241",
"python_version": "3.9.16 (main, Mar 8 2023, 14:00:05) \n[GCC 11.2.0]",
"command_line_arguments": "/home/npg0/anaconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./ml-agents/training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=default --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1685928294"
},
"total": 143052.9272272084,
"count": 1,
"self": 0.14630116522312164,
"children": {
"run_training.setup": {
"total": 0.008112570270895958,
"count": 1,
"self": 0.008112570270895958
},
"TrainerController.start_learning": {
"total": 143052.7728134729,
"count": 1,
"self": 28.223541285842657,
"children": {
"TrainerController._reset_env": {
"total": 14.26833320222795,
"count": 88,
"self": 14.26833320222795
},
"TrainerController.advance": {
"total": 143010.05111518688,
"count": 1204662,
"self": 27.31034628674388,
"children": {
"env_step": {
"total": 137200.91371796094,
"count": 1204662,
"self": 133309.15305005945,
"children": {
"SubprocessEnvManager._take_step": {
"total": 3875.4531029574573,
"count": 1204662,
"self": 140.04572109878063,
"children": {
"TorchPolicy.evaluate": {
"total": 3735.4073818586767,
"count": 2204072,
"self": 3735.4073818586767
}
}
},
"workers": {
"total": 16.307564944028854,
"count": 1204661,
"self": 0.0,
"children": {
"worker_root": {
"total": 143001.97222033888,
"count": 1204661,
"is_parallel": true,
"self": 12688.692400794476,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002761946991086006,
"count": 2,
"is_parallel": true,
"self": 0.0005067139863967896,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0022552330046892166,
"count": 8,
"is_parallel": true,
"self": 0.0022552330046892166
}
}
},
"UnityEnvironment.step": {
"total": 0.13683645986020565,
"count": 1,
"is_parallel": true,
"self": 0.00025064684450626373,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.004089776426553726,
"count": 1,
"is_parallel": true,
"self": 0.004089776426553726
},
"communicator.exchange": {
"total": 0.12985243648290634,
"count": 1,
"is_parallel": true,
"self": 0.12985243648290634
},
"steps_from_proto": {
"total": 0.002643600106239319,
"count": 2,
"is_parallel": true,
"self": 0.0003811102360486984,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0022624898701906204,
"count": 8,
"is_parallel": true,
"self": 0.0022624898701906204
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 130313.0622113347,
"count": 1204660,
"is_parallel": true,
"self": 253.78334576450288,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 1559.9662080537528,
"count": 1204660,
"is_parallel": true,
"self": 1559.9662080537528
},
"communicator.exchange": {
"total": 125633.85390152968,
"count": 1204660,
"is_parallel": true,
"self": 125633.85390152968
},
"steps_from_proto": {
"total": 2865.458755986765,
"count": 2409320,
"is_parallel": true,
"self": 410.41454009898007,
"children": {
"_process_rank_one_or_two_observation": {
"total": 2455.044215887785,
"count": 9637280,
"is_parallel": true,
"self": 2455.044215887785
}
}
}
}
},
"steps_from_proto": {
"total": 0.21760820969939232,
"count": 174,
"is_parallel": true,
"self": 0.031052814796566963,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.18655539490282536,
"count": 696,
"is_parallel": true,
"self": 0.18655539490282536
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 5781.827050939202,
"count": 1204661,
"self": 204.53986318409443,
"children": {
"process_trajectory": {
"total": 2175.4344605356455,
"count": 1204661,
"self": 2167.158844701946,
"children": {
"RLTrainer._checkpoint": {
"total": 8.275615833699703,
"count": 35,
"self": 8.275615833699703
}
}
},
"_update_policy": {
"total": 3401.8527272194624,
"count": 849,
"self": 2113.559172479436,
"children": {
"TorchPOCAOptimizer.update": {
"total": 1288.2935547400266,
"count": 25479,
"self": 1288.2935547400266
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.028000593185425e-07,
"count": 1,
"self": 8.028000593185425e-07
},
"TrainerController._save_models": {
"total": 0.22982299514114857,
"count": 1,
"self": 0.0012078844010829926,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22861511074006557,
"count": 1,
"self": 0.22861511074006557
}
}
}
}
}
}
}