poca-SoccerTwos / run_logs /timers.json
bunnyTech's picture
First Push
e95fc55 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.7942075729370117,
"min": 2.754873275756836,
"max": 3.295686721801758,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 53112.296875,
"min": 24600.42578125,
"max": 137081.078125,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 74.5223880597015,
"min": 60.59493670886076,
"max": 999.0,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19972.0,
"min": 15984.0,
"max": 23700.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1379.42911745563,
"min": 1196.989179088548,
"max": 1388.3395772211052,
"count": 374
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 184843.5017390544,
"min": 2395.2283389784525,
"max": 216594.67121172036,
"count": 374
},
"SoccerTwos.Step.mean": {
"value": 4999794.0,
"min": 9984.0,
"max": 4999794.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 4999794.0,
"min": 9984.0,
"max": 4999794.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.1350976973772049,
"min": -0.035701099783182144,
"max": 0.18220391869544983,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 18.103092193603516,
"min": -0.4051840305328369,
"max": 20.905242919921875,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.12667037546634674,
"min": -0.035627663135528564,
"max": 0.179193377494812,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 16.973831176757812,
"min": -0.40384000539779663,
"max": 21.582408905029297,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.26259104440461345,
"min": -0.5,
"max": 0.5559741979645144,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 35.1871999502182,
"min": -33.56000030040741,
"max": 38.07399958372116,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.26259104440461345,
"min": -0.5,
"max": 0.5559741979645144,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 35.1871999502182,
"min": -33.56000030040741,
"max": 38.07399958372116,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.0172898738499498,
"min": 0.0110649322445776,
"max": 0.02328758956864476,
"count": 233
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.0172898738499498,
"min": 0.0110649322445776,
"max": 0.02328758956864476,
"count": 233
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.07629762962460518,
"min": 2.328598699345245e-07,
"max": 0.07629762962460518,
"count": 233
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.07629762962460518,
"min": 2.328598699345245e-07,
"max": 0.07629762962460518,
"count": 233
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.0788829522828261,
"min": 2.305944605041077e-07,
"max": 0.0788829522828261,
"count": 233
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.0788829522828261,
"min": 2.305944605041077e-07,
"max": 0.0788829522828261,
"count": 233
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 233
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 233
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 233
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 233
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 233
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 233
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1711703059",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/home/dl/miniconda3/envs/huggydog/bin/mlagents-learn ./ml-agents/config/poca/SoccerTwos.yaml --env=./training-envs-executables/linux/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1711710715"
},
"total": 7656.241693258009,
"count": 1,
"self": 0.22022052299871575,
"children": {
"run_training.setup": {
"total": 0.014876824003295042,
"count": 1,
"self": 0.014876824003295042
},
"TrainerController.start_learning": {
"total": 7656.006595911007,
"count": 1,
"self": 4.524906503793318,
"children": {
"TrainerController._reset_env": {
"total": 4.455137729994021,
"count": 25,
"self": 4.455137729994021
},
"TrainerController.advance": {
"total": 7646.895498553204,
"count": 325707,
"self": 4.719002537851338,
"children": {
"env_step": {
"total": 6348.3163182252465,
"count": 325707,
"self": 5141.592596395945,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1204.0513595429074,
"count": 325707,
"self": 32.5252378933219,
"children": {
"TorchPolicy.evaluate": {
"total": 1171.5261216495855,
"count": 643628,
"self": 1171.5261216495855
}
}
},
"workers": {
"total": 2.6723622863937635,
"count": 325707,
"self": 0.0,
"children": {
"worker_root": {
"total": 7642.010254029374,
"count": 325707,
"is_parallel": true,
"self": 3281.5927672590915,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005378963003749959,
"count": 2,
"is_parallel": true,
"self": 0.001213620009366423,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.004165342994383536,
"count": 8,
"is_parallel": true,
"self": 0.004165342994383536
}
}
},
"UnityEnvironment.step": {
"total": 0.036783821997232735,
"count": 1,
"is_parallel": true,
"self": 0.0013477110042003915,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.001046876990585588,
"count": 1,
"is_parallel": true,
"self": 0.001046876990585588
},
"communicator.exchange": {
"total": 0.030378766008652747,
"count": 1,
"is_parallel": true,
"self": 0.030378766008652747
},
"steps_from_proto": {
"total": 0.004010467993794009,
"count": 2,
"is_parallel": true,
"self": 0.0006880099681438878,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0033224580256501213,
"count": 8,
"is_parallel": true,
"self": 0.0033224580256501213
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 4360.3338411293225,
"count": 325706,
"is_parallel": true,
"self": 227.47954032485723,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 145.80886838537117,
"count": 325706,
"is_parallel": true,
"self": 145.80886838537117
},
"communicator.exchange": {
"total": 3299.719791667798,
"count": 325706,
"is_parallel": true,
"self": 3299.719791667798
},
"steps_from_proto": {
"total": 687.3256407512963,
"count": 651412,
"is_parallel": true,
"self": 121.56853551628592,
"children": {
"_process_rank_one_or_two_observation": {
"total": 565.7571052350104,
"count": 2605648,
"is_parallel": true,
"self": 565.7571052350104
}
}
}
}
},
"steps_from_proto": {
"total": 0.08364564095973037,
"count": 48,
"is_parallel": true,
"self": 0.015067722997628152,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.06857791796210222,
"count": 192,
"is_parallel": true,
"self": 0.06857791796210222
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1293.8601777901058,
"count": 325707,
"self": 64.90926352069073,
"children": {
"process_trajectory": {
"total": 397.23057644638175,
"count": 325707,
"self": 395.7071134113503,
"children": {
"RLTrainer._checkpoint": {
"total": 1.5234630350314546,
"count": 10,
"self": 1.5234630350314546
}
}
},
"_update_policy": {
"total": 831.7203378230333,
"count": 233,
"self": 497.1897824712796,
"children": {
"TorchPOCAOptimizer.update": {
"total": 334.5305553517537,
"count": 6990,
"self": 334.5305553517537
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.690097507089376e-07,
"count": 1,
"self": 6.690097507089376e-07
},
"TrainerController._save_models": {
"total": 0.1310524550062837,
"count": 1,
"self": 0.0011843630054499954,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1298680920008337,
"count": 1,
"self": 0.1298680920008337
}
}
}
}
}
}
}