poca-SoccerTwos / run_logs /timers.json
Arindam1975's picture
First Push`
004b5ac
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.732181429862976,
"min": 1.6721676588058472,
"max": 1.7746363878250122,
"count": 201
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 33036.1640625,
"min": 1314.3040771484375,
"max": 39203.53125,
"count": 201
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 59.154761904761905,
"min": 22.0,
"max": 85.08620689655173,
"count": 201
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19876.0,
"min": 88.0,
"max": 20684.0,
"count": 201
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1184.1481187834331,
"min": 1179.3808442439704,
"max": 1245.1211576021508,
"count": 201
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 198936.88395561677,
"min": 2408.3848347360704,
"max": 275078.6501338947,
"count": 201
},
"SoccerTwos.Step.mean": {
"value": 9999880.0,
"min": 7999987.0,
"max": 9999880.0,
"count": 201
},
"SoccerTwos.Step.sum": {
"value": 9999880.0,
"min": 7999987.0,
"max": 9999880.0,
"count": 201
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.047952137887477875,
"min": -0.3794843554496765,
"max": 0.06930270045995712,
"count": 201
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -8.008007049560547,
"min": -17.206485748291016,
"max": 9.979588508605957,
"count": 201
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.04520222172141075,
"min": -0.3782753646373749,
"max": 0.0739375427365303,
"count": 201
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -7.548770904541016,
"min": -18.65555191040039,
"max": 10.647006034851074,
"count": 201
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 201
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 201
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.03427784492869577,
"min": -1.0,
"max": 0.27126915023681963,
"count": 201
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -5.724400103092194,
"min": -60.165599942207336,
"max": 50.998600244522095,
"count": 201
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.03427784492869577,
"min": -1.0,
"max": 0.27126915023681963,
"count": 201
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -5.724400103092194,
"min": -60.165599942207336,
"max": 50.998600244522095,
"count": 201
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 201
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 201
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.015484807884786279,
"min": 0.010712086466567902,
"max": 0.02184484260311971,
"count": 97
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.015484807884786279,
"min": 0.010712086466567902,
"max": 0.02184484260311971,
"count": 97
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10931220576167107,
"min": 0.09008215417464574,
"max": 0.11983859514196714,
"count": 97
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10931220576167107,
"min": 0.09008215417464574,
"max": 0.11983859514196714,
"count": 97
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.11182896370689074,
"min": 0.0918625886241595,
"max": 0.12305790483951569,
"count": 97
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.11182896370689074,
"min": 0.0918625886241595,
"max": 0.12305790483951569,
"count": 97
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 97
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 97
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 97
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 97
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 97
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 97
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1692072363",
"python_version": "3.9.17 (main, Jul 5 2023, 20:47:11) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\arind\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1692080634"
},
"total": 8271.546342399999,
"count": 1,
"self": 0.34422339999946416,
"children": {
"run_training.setup": {
"total": 0.13060319999999992,
"count": 1,
"self": 0.13060319999999992
},
"TrainerController.start_learning": {
"total": 8271.0715158,
"count": 1,
"self": 3.870866999952341,
"children": {
"TrainerController._reset_env": {
"total": 4.523448899999807,
"count": 11,
"self": 4.523448899999807
},
"TrainerController.advance": {
"total": 8262.54827700005,
"count": 139226,
"self": 3.62916780008527,
"children": {
"env_step": {
"total": 2431.2926128999343,
"count": 139226,
"self": 1897.7927180996112,
"children": {
"SubprocessEnvManager._take_step": {
"total": 531.2760609002432,
"count": 139226,
"self": 20.32213670011913,
"children": {
"TorchPolicy.evaluate": {
"total": 510.9539242001241,
"count": 250890,
"self": 510.9539242001241
}
}
},
"workers": {
"total": 2.22383390007973,
"count": 139226,
"self": 0.0,
"children": {
"worker_root": {
"total": 8260.908639200072,
"count": 139226,
"is_parallel": true,
"self": 6760.666442999948,
"children": {
"steps_from_proto": {
"total": 0.02259929999895327,
"count": 22,
"is_parallel": true,
"self": 0.004516000000016618,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.018083299998936653,
"count": 88,
"is_parallel": true,
"self": 0.018083299998936653
}
}
},
"UnityEnvironment.step": {
"total": 1500.2195969001245,
"count": 139226,
"is_parallel": true,
"self": 86.47876889976465,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 75.80817220008848,
"count": 139226,
"is_parallel": true,
"self": 75.80817220008848
},
"communicator.exchange": {
"total": 1074.4842978001786,
"count": 139226,
"is_parallel": true,
"self": 1074.4842978001786
},
"steps_from_proto": {
"total": 263.4483580000929,
"count": 278452,
"is_parallel": true,
"self": 54.522942300503445,
"children": {
"_process_rank_one_or_two_observation": {
"total": 208.92541569958948,
"count": 1113808,
"is_parallel": true,
"self": 208.92541569958948
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 5827.626496300031,
"count": 139226,
"self": 29.34687570002734,
"children": {
"process_trajectory": {
"total": 849.6915618000047,
"count": 139226,
"self": 848.9329515000044,
"children": {
"RLTrainer._checkpoint": {
"total": 0.758610300000381,
"count": 5,
"self": 0.758610300000381
}
}
},
"_update_policy": {
"total": 4948.588058799999,
"count": 97,
"self": 334.6716963999934,
"children": {
"TorchPOCAOptimizer.update": {
"total": 4613.916362400006,
"count": 2910,
"self": 4613.916362400006
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.999988156370819e-07,
"count": 1,
"self": 7.999988156370819e-07
},
"TrainerController._save_models": {
"total": 0.12892209999881743,
"count": 1,
"self": 0.003294999998615822,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1256271000002016,
"count": 1,
"self": 0.1256271000002016
}
}
}
}
}
}
}