poca-SoccerTwos / run_logs /timers.json
Ganu3010's picture
First Push
428f474
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.7906553745269775,
"min": 2.7906553745269775,
"max": 3.152825355529785,
"count": 101
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 52776.875,
"min": 24525.296875,
"max": 97737.3125,
"count": 101
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 67.34246575342466,
"min": 67.34246575342466,
"max": 999.0,
"count": 101
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19664.0,
"min": 1996.0,
"max": 25168.0,
"count": 101
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1368.9925788732346,
"min": 1197.771800761118,
"max": 1374.9423377357964,
"count": 100
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 199872.91651549225,
"min": 2399.287847992216,
"max": 199872.91651549225,
"count": 100
},
"SoccerTwos.Step.mean": {
"value": 2499997.0,
"min": 1499670.0,
"max": 2499997.0,
"count": 101
},
"SoccerTwos.Step.sum": {
"value": 2499997.0,
"min": 1499670.0,
"max": 2499997.0,
"count": 101
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.07111520320177078,
"min": -0.011479731649160385,
"max": 0.18219316005706787,
"count": 101
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 10.382819175720215,
"min": -0.1940249353647232,
"max": 19.814180374145508,
"count": 101
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.0729760229587555,
"min": -0.011654743924736977,
"max": 0.19274993240833282,
"count": 101
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 10.654499053955078,
"min": -0.19143153727054596,
"max": 19.98401641845703,
"count": 101
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 101
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 101
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.08395068286216423,
"min": -0.46153846153846156,
"max": 0.8119999965031942,
"count": 101
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 12.256799697875977,
"min": -19.759999990463257,
"max": 56.10360026359558,
"count": 101
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.08395068286216423,
"min": -0.46153846153846156,
"max": 0.8119999965031942,
"count": 101
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 12.256799697875977,
"min": -19.759999990463257,
"max": 56.10360026359558,
"count": 101
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 101
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 101
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.015483492726343685,
"min": 0.012218045504050678,
"max": 0.020981896084655696,
"count": 47
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.015483492726343685,
"min": 0.012218045504050678,
"max": 0.020981896084655696,
"count": 47
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.06869931096831958,
"min": 0.0008851464566153785,
"max": 0.07153118501106898,
"count": 47
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.06869931096831958,
"min": 0.0008851464566153785,
"max": 0.07153118501106898,
"count": 47
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.07002369984984398,
"min": 0.0008841153399165099,
"max": 0.07281648889183998,
"count": 47
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.07002369984984398,
"min": 0.0008841153399165099,
"max": 0.07281648889183998,
"count": 47
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 47
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 47
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 47
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 47
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 47
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 47
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1682565264",
"python_version": "3.9.16 (main, Mar 8 2023, 14:00:05) \n[GCC 11.2.0]",
"command_line_arguments": "/home/aditya/anaconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1682567120"
},
"total": 1855.288408743,
"count": 1,
"self": 0.14905989399971986,
"children": {
"run_training.setup": {
"total": 0.01727315800002316,
"count": 1,
"self": 0.01727315800002316
},
"TrainerController.start_learning": {
"total": 1855.1220756910002,
"count": 1,
"self": 1.1945324059847735,
"children": {
"TrainerController._reset_env": {
"total": 3.7868088770001975,
"count": 7,
"self": 3.7868088770001975
},
"TrainerController.advance": {
"total": 1849.904136448015,
"count": 65883,
"self": 1.147340069051097,
"children": {
"env_step": {
"total": 1493.1885693970078,
"count": 65883,
"self": 1253.6573550250293,
"children": {
"SubprocessEnvManager._take_step": {
"total": 238.74028168898235,
"count": 65883,
"self": 9.542910278967582,
"children": {
"TorchPolicy.evaluate": {
"total": 229.19737141001477,
"count": 128284,
"self": 229.19737141001477
}
}
},
"workers": {
"total": 0.7909326829961572,
"count": 65882,
"self": 0.0,
"children": {
"worker_root": {
"total": 1852.103163405006,
"count": 65882,
"is_parallel": true,
"self": 767.2810382349901,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004625854999972034,
"count": 2,
"is_parallel": true,
"self": 0.0011449340000240227,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0034809209999480117,
"count": 8,
"is_parallel": true,
"self": 0.0034809209999480117
}
}
},
"UnityEnvironment.step": {
"total": 0.032590048999992405,
"count": 1,
"is_parallel": true,
"self": 0.001097213999997848,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0011243229999990945,
"count": 1,
"is_parallel": true,
"self": 0.0011243229999990945
},
"communicator.exchange": {
"total": 0.02731038499999272,
"count": 1,
"is_parallel": true,
"self": 0.02731038499999272
},
"steps_from_proto": {
"total": 0.0030581270000027416,
"count": 2,
"is_parallel": true,
"self": 0.0005138349999924685,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002544292000010273,
"count": 8,
"is_parallel": true,
"self": 0.002544292000010273
}
}
}
}
}
}
},
"steps_from_proto": {
"total": 0.017299582000248392,
"count": 12,
"is_parallel": true,
"self": 0.0027732200001935325,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.01452636200005486,
"count": 48,
"is_parallel": true,
"self": 0.01452636200005486
}
}
},
"UnityEnvironment.step": {
"total": 1084.8048255880155,
"count": 65881,
"is_parallel": true,
"self": 70.96715294294768,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 50.272730726058654,
"count": 65881,
"is_parallel": true,
"self": 50.272730726058654
},
"communicator.exchange": {
"total": 769.0471808799995,
"count": 65881,
"is_parallel": true,
"self": 769.0471808799995
},
"steps_from_proto": {
"total": 194.51776103900983,
"count": 131762,
"is_parallel": true,
"self": 31.265090789089328,
"children": {
"_process_rank_one_or_two_observation": {
"total": 163.2526702499205,
"count": 527048,
"is_parallel": true,
"self": 163.2526702499205
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 355.5682269819562,
"count": 65882,
"self": 9.973383588958313,
"children": {
"process_trajectory": {
"total": 86.49029842099756,
"count": 65882,
"self": 85.90375237299739,
"children": {
"RLTrainer._checkpoint": {
"total": 0.586546048000173,
"count": 3,
"self": 0.586546048000173
}
}
},
"_update_policy": {
"total": 259.1045449720003,
"count": 47,
"self": 132.59572539199553,
"children": {
"TorchPOCAOptimizer.update": {
"total": 126.5088195800048,
"count": 1410,
"self": 126.5088195800048
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.51000391371781e-07,
"count": 1,
"self": 6.51000391371781e-07
},
"TrainerController._save_models": {
"total": 0.23659730899998976,
"count": 1,
"self": 0.0022121010001683317,
"children": {
"RLTrainer._checkpoint": {
"total": 0.23438520799982143,
"count": 1,
"self": 0.23438520799982143
}
}
}
}
}
}
}