2x / run_logs /timers.json
sinny's picture
2x
3d89b22
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 0.7527447938919067,
"min": 0.7313677072525024,
"max": 1.1261794567108154,
"count": 2798
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 16235.19921875,
"min": 9298.9775390625,
"max": 49444.2265625,
"count": 2798
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 94.56363636363636,
"min": 32.68027210884354,
"max": 111.71111111111111,
"count": 2798
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 20804.0,
"min": 15640.0,
"max": 22116.0,
"count": 2798
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1400.663939849299,
"min": 1343.234013535249,
"max": 1449.6001361353983,
"count": 2798
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 154073.03338342288,
"min": 126805.143041464,
"max": 411132.5810677633,
"count": 2798
},
"SoccerTwos.Step.mean": {
"value": 99999958.0,
"min": 72029953.0,
"max": 99999958.0,
"count": 2798
},
"SoccerTwos.Step.sum": {
"value": 99999958.0,
"min": 72029953.0,
"max": 99999958.0,
"count": 2798
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.004223335534334183,
"min": -0.11730516701936722,
"max": 0.049747075885534286,
"count": 2798
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 0.45612025260925293,
"min": -27.16960906982422,
"max": 11.099370956420898,
"count": 2798
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.004822712391614914,
"min": -0.11837155371904373,
"max": 0.04958106949925423,
"count": 2798
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 0.5208529233932495,
"min": -27.064889907836914,
"max": 11.35406494140625,
"count": 2798
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 2798
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 2798
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.2900277762501328,
"min": -0.42298035642930437,
"max": 0.3460653859835405,
"count": 2798
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 31.322999835014343,
"min": -63.39960038661957,
"max": 43.89800024032593,
"count": 2798
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.2900277762501328,
"min": -0.42298035642930437,
"max": 0.3460653859835405,
"count": 2798
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 31.322999835014343,
"min": -63.39960038661957,
"max": 43.89800024032593,
"count": 2798
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 2798
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 2798
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.013111631310312077,
"min": 0.007057794004989167,
"max": 0.017851967857374498,
"count": 680
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.013111631310312077,
"min": 0.007057794004989167,
"max": 0.017851967857374498,
"count": 680
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.09178730522592862,
"min": 0.07855677058299383,
"max": 0.10971007843812307,
"count": 680
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.09178730522592862,
"min": 0.07855677058299383,
"max": 0.10971007843812307,
"count": 680
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09236818452676138,
"min": 0.07896297425031662,
"max": 0.11027897596359253,
"count": 680
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.09236818452676138,
"min": 0.07896297425031662,
"max": 0.11027897596359253,
"count": 680
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 4.4967996261031204e-08,
"min": 4.4967996261031204e-08,
"max": 0.00033524322006307094,
"count": 680
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 4.4967996261031204e-08,
"min": 4.4967996261031204e-08,
"max": 0.00033524322006307094,
"count": 680
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.10000373899999998,
"min": 0.10000373899999998,
"max": 0.12793692900000003,
"count": 680
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.10000373899999998,
"min": 0.10000373899999998,
"max": 0.12793692900000003,
"count": 680
},
"SoccerTwos.Policy.Beta.mean": {
"value": 1.0186576100000135e-05,
"min": 1.0186576100000135e-05,
"max": 0.0014040527570999995,
"count": 680
},
"SoccerTwos.Policy.Beta.sum": {
"value": 1.0186576100000135e-05,
"min": 1.0186576100000135e-05,
"max": 0.0014040527570999995,
"count": 680
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676759098",
"python_version": "3.9.0 (default, Nov 15 2020, 14:28:56) \n[GCC 7.3.0]",
"command_line_arguments": "/home/olav/dev/anaconda3/envs/rl/bin/mlagents-learn 2x.yaml --env=./training-envs-executables/SoccerTwos.x86_64 --run-id=2x --no-graphics --results-dir=./results --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0",
"numpy_version": "1.21.2",
"end_time_seconds": "1676775423"
},
"total": 16325.73590417899,
"count": 1,
"self": 0.2780328399967402,
"children": {
"run_training.setup": {
"total": 0.03004099801182747,
"count": 1,
"self": 0.03004099801182747
},
"TrainerController.start_learning": {
"total": 16325.427830340981,
"count": 1,
"self": 7.091412148496602,
"children": {
"TrainerController._reset_env": {
"total": 19.600715613400098,
"count": 141,
"self": 19.600715613400098
},
"TrainerController.advance": {
"total": 16298.605216769094,
"count": 220262,
"self": 4.137662488908973,
"children": {
"env_step": {
"total": 8880.398493857821,
"count": 220262,
"self": 3167.0081850614515,
"children": {
"SubprocessEnvManager._take_step": {
"total": 5704.244866189139,
"count": 2014183,
"self": 148.02886377077084,
"children": {
"TorchPolicy.evaluate": {
"total": 5556.216002418369,
"count": 3718026,
"self": 5556.216002418369
}
}
},
"workers": {
"total": 9.145442607230507,
"count": 220262,
"self": 0.0,
"children": {
"worker_root": {
"total": 195771.2345217719,
"count": 2013804,
"is_parallel": true,
"self": 181232.04467304505,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.012207866937387735,
"count": 14,
"is_parallel": true,
"self": 0.003013705078046769,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.009194161859340966,
"count": 56,
"is_parallel": true,
"self": 0.009194161859340966
}
}
},
"UnityEnvironment.step": {
"total": 0.10618874401552603,
"count": 7,
"is_parallel": true,
"self": 0.002918574959039688,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0024377889349125326,
"count": 7,
"is_parallel": true,
"self": 0.0024377889349125326
},
"communicator.exchange": {
"total": 0.09188617300242186,
"count": 7,
"is_parallel": true,
"self": 0.09188617300242186
},
"steps_from_proto": {
"total": 0.00894620711915195,
"count": 14,
"is_parallel": true,
"self": 0.0018873143708333373,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.007058892748318613,
"count": 56,
"is_parallel": true,
"self": 0.007058892748318613
}
}
}
}
}
}
},
"steps_from_proto": {
"total": 3.0666982491966337,
"count": 3360,
"is_parallel": true,
"self": 0.5372369070537388,
"children": {
"_process_rank_one_or_two_observation": {
"total": 2.529461342142895,
"count": 13440,
"is_parallel": true,
"self": 2.529461342142895
}
}
},
"UnityEnvironment.step": {
"total": 14536.123150477652,
"count": 2013797,
"is_parallel": true,
"self": 914.1332000488765,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 607.0277173338691,
"count": 2013797,
"is_parallel": true,
"self": 607.0277173338691
},
"communicator.exchange": {
"total": 10221.373080469086,
"count": 2013797,
"is_parallel": true,
"self": 10221.373080469086
},
"steps_from_proto": {
"total": 2793.58915262582,
"count": 4027594,
"is_parallel": true,
"self": 538.6196256728144,
"children": {
"_process_rank_one_or_two_observation": {
"total": 2254.9695269530057,
"count": 16110376,
"is_parallel": true,
"self": 2254.9695269530057
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 7414.069060422364,
"count": 220262,
"self": 162.93663687229855,
"children": {
"process_trajectory": {
"total": 2979.520096707449,
"count": 220262,
"self": 2972.5634005694883,
"children": {
"RLTrainer._checkpoint": {
"total": 6.9566961379605345,
"count": 56,
"self": 6.9566961379605345
}
}
},
"_update_policy": {
"total": 4271.612326842616,
"count": 680,
"self": 3076.523804622295,
"children": {
"TorchPOCAOptimizer.update": {
"total": 1195.0885222203215,
"count": 20400,
"self": 1195.0885222203215
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.600174285471439e-07,
"count": 1,
"self": 7.600174285471439e-07
},
"TrainerController._save_models": {
"total": 0.13048504997277632,
"count": 1,
"self": 0.0009444899624213576,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12954056001035497,
"count": 1,
"self": 0.12954056001035497
}
}
}
}
}
}
}