poca-SoccerTwos / run_logs /timers.json
juansebashr's picture
Third version
ba496db
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.8118613958358765,
"min": 1.6636101007461548,
"max": 1.843156099319458,
"count": 138
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 34613.80078125,
"min": 31359.28515625,
"max": 39437.20703125,
"count": 138
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 54.15217391304348,
"min": 44.61818181818182,
"max": 74.35820895522389,
"count": 138
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19928.0,
"min": 16656.0,
"max": 20384.0,
"count": 138
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1702.4857863511247,
"min": 1689.0574729382788,
"max": 1741.4789908541304,
"count": 138
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 313257.38468860695,
"min": 230337.3248480296,
"max": 374924.64448671497,
"count": 138
},
"SoccerTwos.Step.mean": {
"value": 10009980.0,
"min": 8639960.0,
"max": 10009980.0,
"count": 138
},
"SoccerTwos.Step.sum": {
"value": 10009980.0,
"min": 8639960.0,
"max": 10009980.0,
"count": 138
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.010594820603728294,
"min": -0.09363055974245071,
"max": 0.06854648143053055,
"count": 138
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -1.9388521909713745,
"min": -18.35158920288086,
"max": 13.549934387207031,
"count": 138
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.012300742790102959,
"min": -0.09186331927776337,
"max": 0.07186656445264816,
"count": 138
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -2.2510359287261963,
"min": -18.005210876464844,
"max": 15.093283653259277,
"count": 138
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 138
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 138
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.009724591599136103,
"min": -0.30681618445181436,
"max": 0.25714331578443395,
"count": 138
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -1.7796002626419067,
"min": -53.515599727630615,
"max": 50.4333997964859,
"count": 138
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.009724591599136103,
"min": -0.30681618445181436,
"max": 0.25714331578443395,
"count": 138
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -1.7796002626419067,
"min": -53.515599727630615,
"max": 50.4333997964859,
"count": 138
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 138
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 138
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.019012902875935347,
"min": 0.010337586479363382,
"max": 0.02299332935363054,
"count": 66
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.019012902875935347,
"min": 0.010337586479363382,
"max": 0.02299332935363054,
"count": 66
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10139179453253747,
"min": 0.09779081518451373,
"max": 0.12342411826054255,
"count": 66
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10139179453253747,
"min": 0.09779081518451373,
"max": 0.12342411826054255,
"count": 66
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10178855781753858,
"min": 0.09808570270737012,
"max": 0.12474122320612271,
"count": 66
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10178855781753858,
"min": 0.09808570270737012,
"max": 0.12474122320612271,
"count": 66
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.00015,
"min": 0.00015,
"max": 0.00015,
"count": 66
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.00015,
"min": 0.00015,
"max": 0.00015,
"count": 66
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 66
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 66
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 66
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 66
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678726861",
"python_version": "3.9.5 (tags/v3.9.5:0a7dcbd, May 3 2021, 17:27:52) [MSC v.1928 64 bit (AMD64)]",
"command_line_arguments": "D:\\Documents\\Tesis\\thesis_env\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.13.1+cu117",
"numpy_version": "1.21.2",
"end_time_seconds": "1678728868"
},
"total": 2007.3160672,
"count": 1,
"self": 0.056033299999853625,
"children": {
"run_training.setup": {
"total": 0.08213850000000011,
"count": 1,
"self": 0.08213850000000011
},
"TrainerController.start_learning": {
"total": 2007.1778954000001,
"count": 1,
"self": 1.637189700020599,
"children": {
"TrainerController._reset_env": {
"total": 4.090669199999826,
"count": 8,
"self": 4.090669199999826
},
"TrainerController.advance": {
"total": 2001.3126216999794,
"count": 96197,
"self": 1.4328627999125274,
"children": {
"env_step": {
"total": 1407.015403500059,
"count": 96197,
"self": 788.8511395001028,
"children": {
"SubprocessEnvManager._take_step": {
"total": 617.252432499953,
"count": 96197,
"self": 8.982159299974114,
"children": {
"TorchPolicy.evaluate": {
"total": 608.2702731999789,
"count": 172776,
"self": 608.2702731999789
}
}
},
"workers": {
"total": 0.9118315000031885,
"count": 96197,
"self": 0.0,
"children": {
"worker_root": {
"total": 1998.0237361000184,
"count": 96197,
"is_parallel": true,
"self": 1365.5229779000274,
"children": {
"steps_from_proto": {
"total": 0.010172899999911778,
"count": 16,
"is_parallel": true,
"self": 0.00204889999975455,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.008124000000157228,
"count": 64,
"is_parallel": true,
"self": 0.008124000000157228
}
}
},
"UnityEnvironment.step": {
"total": 632.4905852999909,
"count": 96197,
"is_parallel": true,
"self": 35.8199392999702,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.812671299991184,
"count": 96197,
"is_parallel": true,
"self": 25.812671299991184
},
"communicator.exchange": {
"total": 465.90249760001274,
"count": 96197,
"is_parallel": true,
"self": 465.90249760001274
},
"steps_from_proto": {
"total": 104.95547710001682,
"count": 192394,
"is_parallel": true,
"self": 21.353446499925468,
"children": {
"_process_rank_one_or_two_observation": {
"total": 83.60203060009135,
"count": 769576,
"is_parallel": true,
"self": 83.60203060009135
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 592.864355400008,
"count": 96197,
"self": 12.526563400013629,
"children": {
"process_trajectory": {
"total": 312.0778004999946,
"count": 96197,
"self": 311.60571909999476,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4720813999998654,
"count": 3,
"self": 0.4720813999998654
}
}
},
"_update_policy": {
"total": 268.2599914999998,
"count": 67,
"self": 141.44559800000098,
"children": {
"TorchPOCAOptimizer.update": {
"total": 126.81439349999879,
"count": 2007,
"self": 126.81439349999879
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.000000212225132e-07,
"count": 1,
"self": 6.000000212225132e-07
},
"TrainerController._save_models": {
"total": 0.1374142000001939,
"count": 1,
"self": 0.0266540000002351,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11076019999995879,
"count": 1,
"self": 0.11076019999995879
}
}
}
}
}
}
}