poca-SoccerTwos / run_logs /timers.json
emmade-1999's picture
First Push
783a753
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.9291170835494995,
"min": 1.8552395105361938,
"max": 3.2957236766815186,
"count": 1180
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 39261.390625,
"min": 18268.6484375,
"max": 146873.828125,
"count": 1180
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 57.11363636363637,
"min": 40.12,
"max": 999.0,
"count": 1180
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 20104.0,
"min": 10500.0,
"max": 29024.0,
"count": 1180
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1658.209464932352,
"min": 1189.585084023995,
"max": 1725.188051107122,
"count": 1156
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 291844.86582809396,
"min": 2381.9127099984767,
"max": 407354.7901875677,
"count": 1156
},
"SoccerTwos.Step.mean": {
"value": 11799924.0,
"min": 9448.0,
"max": 11799924.0,
"count": 1180
},
"SoccerTwos.Step.sum": {
"value": 11799924.0,
"min": 9448.0,
"max": 11799924.0,
"count": 1180
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.049491703510284424,
"min": -0.10328812152147293,
"max": 0.18030281364917755,
"count": 1180
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -8.66104793548584,
"min": -14.271673202514648,
"max": 24.303085327148438,
"count": 1180
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.05083049088716507,
"min": -0.10325547307729721,
"max": 0.18770861625671387,
"count": 1180
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -8.895336151123047,
"min": -14.213946342468262,
"max": 25.439725875854492,
"count": 1180
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1180
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1180
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.14695199932370867,
"min": -0.6272461528961475,
"max": 0.7841285753006838,
"count": 1180
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -25.716599881649017,
"min": -47.014200150966644,
"max": 76.84460037946701,
"count": 1180
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.14695199932370867,
"min": -0.6272461528961475,
"max": 0.7841285753006838,
"count": 1180
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -25.716599881649017,
"min": -47.014200150966644,
"max": 76.84460037946701,
"count": 1180
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1180
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1180
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01856582779728342,
"min": 0.008319745107049433,
"max": 0.020095219637732954,
"count": 285
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01856582779728342,
"min": 0.008319745107049433,
"max": 0.020095219637732954,
"count": 285
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.06312520131468773,
"min": 0.0005722402932102947,
"max": 0.07544807940721512,
"count": 285
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.06312520131468773,
"min": 0.0005722402932102947,
"max": 0.07544807940721512,
"count": 285
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.06485664981106917,
"min": 0.0005708885793865193,
"max": 0.07775571793317795,
"count": 285
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.06485664981106917,
"min": 0.0005708885793865193,
"max": 0.07775571793317795,
"count": 285
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0010000000000000002,
"min": 0.001,
"max": 0.0010000000000000002,
"count": 285
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0010000000000000002,
"min": 0.001,
"max": 0.0010000000000000002,
"count": 285
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 285
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 285
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 285
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 285
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1686108196",
"python_version": "3.9.16 (main, Mar 8 2023, 10:39:24) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\eaden\\miniconda3\\envs\\rlHuggingFace\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1686144921"
},
"total": 36725.6045326,
"count": 1,
"self": 0.005228700007137377,
"children": {
"run_training.setup": {
"total": 0.09423650000000006,
"count": 1,
"self": 0.09423650000000006
},
"TrainerController.start_learning": {
"total": 36725.505067399994,
"count": 1,
"self": 15.754929300521326,
"children": {
"TrainerController._reset_env": {
"total": 4.390641099989624,
"count": 59,
"self": 4.390641099989624
},
"TrainerController.advance": {
"total": 36705.24884719948,
"count": 805834,
"self": 15.07107550198998,
"children": {
"env_step": {
"total": 10308.16381749805,
"count": 805834,
"self": 8016.082624194722,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2282.847217702719,
"count": 805834,
"self": 80.76060490266991,
"children": {
"TorchPolicy.evaluate": {
"total": 2202.086612800049,
"count": 1487072,
"self": 2202.086612800049
}
}
},
"workers": {
"total": 9.23397560061084,
"count": 805834,
"self": 0.0,
"children": {
"worker_root": {
"total": 36661.820653898125,
"count": 805834,
"is_parallel": true,
"self": 30250.67168949754,
"children": {
"steps_from_proto": {
"total": 0.08054820000879204,
"count": 118,
"is_parallel": true,
"self": 0.01661829996981501,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.06392990003897703,
"count": 472,
"is_parallel": true,
"self": 0.06392990003897703
}
}
},
"UnityEnvironment.step": {
"total": 6411.068416200578,
"count": 805834,
"is_parallel": true,
"self": 325.3614903000116,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 232.41281130160502,
"count": 805834,
"is_parallel": true,
"self": 232.41281130160502
},
"communicator.exchange": {
"total": 4817.202108301453,
"count": 805834,
"is_parallel": true,
"self": 4817.202108301453
},
"steps_from_proto": {
"total": 1036.0920062975083,
"count": 1611668,
"is_parallel": true,
"self": 215.1892200998243,
"children": {
"_process_rank_one_or_two_observation": {
"total": 820.902786197684,
"count": 6446672,
"is_parallel": true,
"self": 820.902786197684
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 26382.013954199436,
"count": 805834,
"self": 112.60498300103427,
"children": {
"process_trajectory": {
"total": 2283.8579623984297,
"count": 805834,
"self": 2281.2781560984286,
"children": {
"RLTrainer._checkpoint": {
"total": 2.5798063000011098,
"count": 23,
"self": 2.5798063000011098
}
}
},
"_update_policy": {
"total": 23985.551008799972,
"count": 286,
"self": 1531.1537574001086,
"children": {
"TorchPOCAOptimizer.update": {
"total": 22454.397251399863,
"count": 8565,
"self": 22454.397251399863
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.999995770864189e-07,
"count": 1,
"self": 8.999995770864189e-07
},
"TrainerController._save_models": {
"total": 0.1106489000012516,
"count": 1,
"self": 0.00529159999859985,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10535730000265175,
"count": 1,
"self": 0.10535730000265175
}
}
}
}
}
}
}