poca-SoccerTwos / run_logs /timers.json
tbumi's picture
v01
1eeb4d6 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.2699766159057617,
"min": 2.232830762863159,
"max": 3.2957026958465576,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 41767.5703125,
"min": 28372.73828125,
"max": 110894.3203125,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 403.8,
"max": 999.0,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19980.0,
"min": 13852.0,
"max": 27860.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1204.061149742461,
"min": 1195.1258727213449,
"max": 1207.552417669949,
"count": 108
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 2408.122299484922,
"min": 2391.3603685124503,
"max": 14414.783627873705,
"count": 108
},
"SoccerTwos.Step.mean": {
"value": 4999660.0,
"min": 9282.0,
"max": 4999660.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 4999660.0,
"min": 9282.0,
"max": 4999660.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 2.9040646040812135e-05,
"min": -0.03394120931625366,
"max": 0.03186890110373497,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 0.00029040646040812135,
"min": -0.40145978331565857,
"max": 0.4142957031726837,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4253662559203804e-05,
"min": -0.03666608780622482,
"max": 0.02829485572874546,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 0.00024253662559203804,
"min": -0.4280465245246887,
"max": 0.36700335144996643,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.0,
"min": -0.4414666652679443,
"max": 0.19018461612554696,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 0.0,
"min": -6.621999979019165,
"max": 2.4724000096321106,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.0,
"min": -0.4414666652679443,
"max": 0.19018461612554696,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 0.0,
"min": -6.621999979019165,
"max": 2.4724000096321106,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.02023627607074256,
"min": 0.009919229924465374,
"max": 0.022984933798822264,
"count": 228
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.02023627607074256,
"min": 0.009919229924465374,
"max": 0.022984933798822264,
"count": 228
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 4.686683456138023e-09,
"min": 1.606776804200886e-09,
"max": 0.006113959864402811,
"count": 228
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 4.686683456138023e-09,
"min": 1.606776804200886e-09,
"max": 0.006113959864402811,
"count": 228
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 5.256828912910313e-09,
"min": 2.3889310899842067e-09,
"max": 0.0061291882457832495,
"count": 228
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 5.256828912910313e-09,
"min": 2.3889310899842067e-09,
"max": 0.0061291882457832495,
"count": 228
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 228
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 228
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 228
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 228
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 228
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 228
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1720975136",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/home/traphole/miniconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos-02 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1720981081"
},
"total": 5945.021420411,
"count": 1,
"self": 0.16699708000032842,
"children": {
"run_training.setup": {
"total": 0.006612598999936381,
"count": 1,
"self": 0.006612598999936381
},
"TrainerController.start_learning": {
"total": 5944.847810732,
"count": 1,
"self": 3.611256097077785,
"children": {
"TrainerController._reset_env": {
"total": 2.2492282200000773,
"count": 25,
"self": 2.2492282200000773
},
"TrainerController.advance": {
"total": 5938.898799472922,
"count": 325451,
"self": 3.369235935994766,
"children": {
"env_step": {
"total": 4439.669070409934,
"count": 325451,
"self": 3065.3949625970645,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1371.6715188837486,
"count": 325451,
"self": 23.89687841690602,
"children": {
"TorchPolicy.evaluate": {
"total": 1347.7746404668426,
"count": 647080,
"self": 1347.7746404668426
}
}
},
"workers": {
"total": 2.6025889291206568,
"count": 325451,
"self": 0.0,
"children": {
"worker_root": {
"total": 5937.670574957921,
"count": 325451,
"is_parallel": true,
"self": 3288.615535080862,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019549280000319413,
"count": 2,
"is_parallel": true,
"self": 0.0009611390000827669,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0009937889999491745,
"count": 8,
"is_parallel": true,
"self": 0.0009937889999491745
}
}
},
"UnityEnvironment.step": {
"total": 0.015712352999798895,
"count": 1,
"is_parallel": true,
"self": 0.0002828979995683767,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020708700003524427,
"count": 1,
"is_parallel": true,
"self": 0.00020708700003524427
},
"communicator.exchange": {
"total": 0.014340580999942176,
"count": 1,
"is_parallel": true,
"self": 0.014340580999942176
},
"steps_from_proto": {
"total": 0.0008817870002530981,
"count": 2,
"is_parallel": true,
"self": 0.00021548600034293486,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006663009999101632,
"count": 8,
"is_parallel": true,
"self": 0.0006663009999101632
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2649.0360585360595,
"count": 325450,
"is_parallel": true,
"self": 82.37017596480291,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 54.07778425397055,
"count": 325450,
"is_parallel": true,
"self": 54.07778425397055
},
"communicator.exchange": {
"total": 2279.225571795141,
"count": 325450,
"is_parallel": true,
"self": 2279.225571795141
},
"steps_from_proto": {
"total": 233.36252652214466,
"count": 650900,
"is_parallel": true,
"self": 47.633744677654704,
"children": {
"_process_rank_one_or_two_observation": {
"total": 185.72878184448996,
"count": 2603600,
"is_parallel": true,
"self": 185.72878184448996
}
}
}
}
},
"steps_from_proto": {
"total": 0.01898134099951676,
"count": 48,
"is_parallel": true,
"self": 0.003881796998257414,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.015099544001259346,
"count": 192,
"is_parallel": true,
"self": 0.015099544001259346
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1495.860493126994,
"count": 325451,
"self": 36.14877145807577,
"children": {
"process_trajectory": {
"total": 320.3251588089181,
"count": 325451,
"self": 319.3898621959188,
"children": {
"RLTrainer._checkpoint": {
"total": 0.9352966129993092,
"count": 10,
"self": 0.9352966129993092
}
}
},
"_update_policy": {
"total": 1139.3865628600001,
"count": 228,
"self": 267.03702401602345,
"children": {
"TorchPOCAOptimizer.update": {
"total": 872.3495388439767,
"count": 6849,
"self": 872.3495388439767
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.90999809699133e-07,
"count": 1,
"self": 4.90999809699133e-07
},
"TrainerController._save_models": {
"total": 0.08852645099977963,
"count": 1,
"self": 0.0009331150004072697,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08759333599937236,
"count": 1,
"self": 0.08759333599937236
}
}
}
}
}
}
}