poca-SoccerTwos / run_logs /timers.json
Nasree's picture
First Push
d05c3cb
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.2244436740875244,
"min": 3.198763370513916,
"max": 3.241471290588379,
"count": 66
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 73156.1796875,
"min": 3098.9404296875,
"max": 512219.9375,
"count": 66
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 587.4444444444445,
"min": 144.44444444444446,
"max": 999.0,
"count": 66
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 21148.0,
"min": 7984.0,
"max": 41472.0,
"count": 66
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1222.8851096353276,
"min": 1215.8343526980111,
"max": 1225.746497755484,
"count": 45
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 7337.310657811966,
"min": 2437.283413100037,
"max": 38924.945203934614,
"count": 45
},
"SoccerTwos.Step.mean": {
"value": 1499672.0,
"min": 829838.0,
"max": 1499672.0,
"count": 68
},
"SoccerTwos.Step.sum": {
"value": 1499672.0,
"min": 829838.0,
"max": 1499672.0,
"count": 68
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.016859833151102066,
"min": -0.02104775421321392,
"max": -0.003187461756169796,
"count": 68
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.2866171598434448,
"min": -0.5619957447052002,
"max": -0.05565240606665611,
"count": 68
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.017617706209421158,
"min": -0.0208752378821373,
"max": -0.002720274729654193,
"count": 68
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.2995010018348694,
"min": -0.6418810486793518,
"max": -0.05168522149324417,
"count": 68
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 68
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 68
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.21416470583747416,
"min": -0.5732000009580092,
"max": 0.373317655395059,
"count": 68
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -3.6407999992370605,
"min": -12.035600066184998,
"max": 6.346400141716003,
"count": 68
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.21416470583747416,
"min": -0.5732000009580092,
"max": 0.373317655395059,
"count": 68
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -3.6407999992370605,
"min": -12.035600066184998,
"max": 6.346400141716003,
"count": 68
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 68
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 68
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01624850803151882,
"min": 0.012868414125524653,
"max": 0.021901836921460926,
"count": 30
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01624850803151882,
"min": 0.012868414125524653,
"max": 0.021901836921460926,
"count": 30
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.004522712070805331,
"min": 2.6450971277982817e-06,
"max": 0.020304427420099577,
"count": 30
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.004522712070805331,
"min": 2.6450971277982817e-06,
"max": 0.020304427420099577,
"count": 30
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.004603836918249726,
"min": 2.745565104255042e-06,
"max": 0.020521839211384455,
"count": 30
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.004603836918249726,
"min": 2.745565104255042e-06,
"max": 0.020521839211384455,
"count": 30
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 30
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 30
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 30
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 30
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 30
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 30
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1681121497",
"python_version": "3.9.16 | packaged by conda-forge | (main, Feb 1 2023, 21:39:03) \n[GCC 11.3.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn /content/ml-agents/config/poca/SoccerTwos.yaml --env /content/SoccerTwos/SoccerTwos.x86_64 --run-id=resnet --no-graphics --resume --num-envs=16",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1681129465"
},
"total": 7968.739322806,
"count": 1,
"self": 0.19042344400077127,
"children": {
"run_training.setup": {
"total": 0.4053061519999801,
"count": 1,
"self": 0.4053061519999801
},
"TrainerController.start_learning": {
"total": 7968.143593209999,
"count": 1,
"self": 8.747177222989194,
"children": {
"TrainerController._reset_env": {
"total": 26.529754764999893,
"count": 5,
"self": 26.529754764999893
},
"TrainerController.advance": {
"total": 7927.853502225012,
"count": 17333,
"self": 8.594402394021927,
"children": {
"env_step": {
"total": 7350.944601472977,
"count": 17333,
"self": 3028.5409793460803,
"children": {
"SubprocessEnvManager._take_step": {
"total": 4317.983979797907,
"count": 65911,
"self": 136.1861150478262,
"children": {
"TorchPolicy.evaluate": {
"total": 4181.79786475008,
"count": 131043,
"self": 4181.79786475008
}
}
},
"workers": {
"total": 4.419642328990335,
"count": 17332,
"self": 0.0,
"children": {
"worker_root": {
"total": 126883.41742792574,
"count": 65850,
"is_parallel": true,
"self": 44471.91520949743,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.07903244199997062,
"count": 4,
"is_parallel": true,
"self": 0.0504545539999981,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.028577887999972518,
"count": 16,
"is_parallel": true,
"self": 0.028577887999972518
}
}
},
"UnityEnvironment.step": {
"total": 3.867568241000072,
"count": 2,
"is_parallel": true,
"self": 0.0008936390000826577,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.005877819000033924,
"count": 2,
"is_parallel": true,
"self": 0.005877819000033924
},
"communicator.exchange": {
"total": 3.714437587999896,
"count": 2,
"is_parallel": true,
"self": 3.714437587999896
},
"steps_from_proto": {
"total": 0.14635919500005912,
"count": 4,
"is_parallel": true,
"self": 0.0015648520000013377,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.14479434300005778,
"count": 16,
"is_parallel": true,
"self": 0.14479434300005778
}
}
}
}
}
}
},
"steps_from_proto": {
"total": 2.661323855997921,
"count": 128,
"is_parallel": true,
"self": 0.2890387419953413,
"children": {
"_process_rank_one_or_two_observation": {
"total": 2.3722851140025796,
"count": 512,
"is_parallel": true,
"self": 2.3722851140025796
}
}
},
"UnityEnvironment.step": {
"total": 82408.84089457232,
"count": 65848,
"is_parallel": true,
"self": 305.22967830090784,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 260.7206562592287,
"count": 65848,
"is_parallel": true,
"self": 260.7206562592287
},
"communicator.exchange": {
"total": 77990.7761114221,
"count": 65848,
"is_parallel": true,
"self": 77990.7761114221
},
"steps_from_proto": {
"total": 3852.114448590069,
"count": 131696,
"is_parallel": true,
"self": 509.0213842210951,
"children": {
"_process_rank_one_or_two_observation": {
"total": 3343.093064368974,
"count": 526784,
"is_parallel": true,
"self": 3343.093064368974
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 568.3144983580123,
"count": 17332,
"self": 57.62242512005787,
"children": {
"process_trajectory": {
"total": 249.20354784495748,
"count": 17332,
"self": 248.19931421295814,
"children": {
"RLTrainer._checkpoint": {
"total": 1.004233631999341,
"count": 2,
"self": 1.004233631999341
}
}
},
"_update_policy": {
"total": 261.48852539299696,
"count": 30,
"self": 171.83107314999052,
"children": {
"TorchPOCAOptimizer.update": {
"total": 89.65745224300645,
"count": 948,
"self": 89.65745224300645
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.538999640615657e-06,
"count": 1,
"self": 1.538999640615657e-06
},
"TrainerController._save_models": {
"total": 5.013157457999114,
"count": 1,
"self": 0.03999088499949721,
"children": {
"RLTrainer._checkpoint": {
"total": 4.973166572999617,
"count": 1,
"self": 4.973166572999617
}
}
}
}
}
}
}