Golobama's picture
First Push`
f524a45 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": -3.576278402306343e-07,
"min": -3.576278402306343e-07,
"max": 2.557163715362549,
"count": 1132
},
"SoccerTwos.Policy.Entropy.sum": {
"value": -0.00946426298469305,
"min": -0.01506040245294571,
"max": 76979.546875,
"count": 1132
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 292.7857142857143,
"max": 999.0,
"count": 1132
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19980.0,
"min": 3996.0,
"max": 30500.0,
"count": 1132
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1217.9983411658543,
"min": 1195.3378159121753,
"max": 1226.1962097703772,
"count": 930
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 7307.990046995126,
"min": 2392.145426596695,
"max": 31557.451750627508,
"count": 930
},
"SoccerTwos.Step.mean": {
"value": 11319452.0,
"min": 9268.0,
"max": 11319452.0,
"count": 1132
},
"SoccerTwos.Step.sum": {
"value": 11319452.0,
"min": 9268.0,
"max": 11319452.0,
"count": 1132
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.661151111125946,
"min": -18.157546997070312,
"max": 35.32452392578125,
"count": 1132
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 6.61151123046875,
"min": -363.15093994140625,
"max": 751.2879028320312,
"count": 1132
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6578042507171631,
"min": -17.91787338256836,
"max": 34.58213424682617,
"count": 1132
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 6.578042507171631,
"min": -358.35748291015625,
"max": 725.6465454101562,
"count": 1132
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1132
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1132
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.0,
"min": -0.625,
"max": 0.48564211318367406,
"count": 1132
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 0.0,
"min": -12.221199989318848,
"max": 11.304799914360046,
"count": 1132
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.0,
"min": -0.625,
"max": 0.48564211318367406,
"count": 1132
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 0.0,
"min": -12.221199989318848,
"max": 11.304799914360046,
"count": 1132
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1132
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1132
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.017271466801563898,
"min": 0.01025862561412699,
"max": 0.9351533432801564,
"count": 524
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.017271466801563898,
"min": 0.01025862561412699,
"max": 0.9351533432801564,
"count": 524
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 7.218246236195167,
"min": 0.00014766889119831225,
"max": 32919.68486836751,
"count": 524
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 7.218246236195167,
"min": 0.00014766889119831225,
"max": 32919.68486836751,
"count": 524
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 7.428483950843414,
"min": 0.00042878056410700083,
"max": 29269.570963541668,
"count": 524
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 7.428483950843414,
"min": 0.00042878056410700083,
"max": 29269.570963541668,
"count": 524
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 524
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 524
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 524
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 524
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 524
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 524
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1723569977",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\manug\\miniconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=C:\\Users\\manug\\ml-agents\\training-envs-executables\\SoccerTwos\\SoccerTwos.exe --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.4.0+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1723655904"
},
"total": 85927.1624728,
"count": 1,
"self": 2.2682376000011573,
"children": {
"run_training.setup": {
"total": 0.28242929999760236,
"count": 1,
"self": 0.28242929999760236
},
"TrainerController.start_learning": {
"total": 85924.6118059,
"count": 1,
"self": 26.896228401674307,
"children": {
"TrainerController._reset_env": {
"total": 18.198888300037652,
"count": 57,
"self": 18.198888300037652
},
"TrainerController.advance": {
"total": 85879.32728509829,
"count": 740894,
"self": 26.599330598357483,
"children": {
"env_step": {
"total": 20169.867521103308,
"count": 740894,
"self": 14972.973272791907,
"children": {
"SubprocessEnvManager._take_step": {
"total": 5179.164462507302,
"count": 740894,
"self": 187.56233811143466,
"children": {
"TorchPolicy.evaluate": {
"total": 4991.602124395868,
"count": 1469106,
"self": 4991.602124395868
}
}
},
"workers": {
"total": 17.729785804098356,
"count": 740894,
"self": 0.0,
"children": {
"worker_root": {
"total": 85877.44118350167,
"count": 740894,
"is_parallel": true,
"self": 74464.28552350338,
"children": {
"steps_from_proto": {
"total": 0.14002719999916735,
"count": 114,
"is_parallel": true,
"self": 0.02901829989423277,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.11100890010493458,
"count": 456,
"is_parallel": true,
"self": 0.11100890010493458
}
}
},
"UnityEnvironment.step": {
"total": 11413.015632798284,
"count": 740894,
"is_parallel": true,
"self": 684.2067775828509,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 665.2635215983828,
"count": 740894,
"is_parallel": true,
"self": 665.2635215983828
},
"communicator.exchange": {
"total": 7872.388067304546,
"count": 740894,
"is_parallel": true,
"self": 7872.388067304546
},
"steps_from_proto": {
"total": 2191.1572663125044,
"count": 1481788,
"is_parallel": true,
"self": 443.9809436022333,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1747.176322710271,
"count": 5927152,
"is_parallel": true,
"self": 1747.176322710271
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 65682.86043339662,
"count": 740894,
"self": 206.60896219848655,
"children": {
"process_trajectory": {
"total": 7706.6352393983,
"count": 740894,
"self": 7702.999688698292,
"children": {
"RLTrainer._checkpoint": {
"total": 3.635550700008025,
"count": 22,
"self": 3.635550700008025
}
}
},
"_update_policy": {
"total": 57769.61623179984,
"count": 524,
"self": 2635.168282599647,
"children": {
"TorchPOCAOptimizer.update": {
"total": 55134.44794920019,
"count": 15738,
"self": 55134.44794920019
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.7999991541728377e-06,
"count": 1,
"self": 1.7999991541728377e-06
},
"TrainerController._save_models": {
"total": 0.18940229999134317,
"count": 1,
"self": 0.01145429999451153,
"children": {
"RLTrainer._checkpoint": {
"total": 0.17794799999683164,
"count": 1,
"self": 0.17794799999683164
}
}
}
}
}
}
}