poca-SoccerTwos / run_logs /timers.json
Sanyam0605's picture
First Push`
8b2a4b4
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.8641321659088135,
"min": 1.7908551692962646,
"max": 3.2957489490509033,
"count": 509
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 38594.9921875,
"min": 14536.015625,
"max": 136551.90625,
"count": 509
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 63.91025641025641,
"min": 41.398305084745765,
"max": 999.0,
"count": 509
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19940.0,
"min": 13212.0,
"max": 29672.0,
"count": 509
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1632.8899370576994,
"min": 1191.110470024826,
"max": 1655.0225014802024,
"count": 501
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 254730.8301810011,
"min": 2382.220940049652,
"max": 375787.27849597647,
"count": 501
},
"SoccerTwos.Step.mean": {
"value": 5089950.0,
"min": 9854.0,
"max": 5089950.0,
"count": 509
},
"SoccerTwos.Step.sum": {
"value": 5089950.0,
"min": 9854.0,
"max": 5089950.0,
"count": 509
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.01745498552918434,
"min": -0.08539241552352905,
"max": 0.21286641061306,
"count": 509
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -2.722977638244629,
"min": -16.76850700378418,
"max": 29.801298141479492,
"count": 509
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.02577521838247776,
"min": -0.09171554446220398,
"max": 0.21494805812835693,
"count": 509
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -4.020934104919434,
"min": -16.529672622680664,
"max": 30.092727661132812,
"count": 509
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 509
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 509
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.1250102542913877,
"min": -0.6153846153846154,
"max": 0.5182719993591308,
"count": 509
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -19.501599669456482,
"min": -51.33580005168915,
"max": 64.78399991989136,
"count": 509
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.1250102542913877,
"min": -0.6153846153846154,
"max": 0.5182719993591308,
"count": 509
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -19.501599669456482,
"min": -51.33580005168915,
"max": 64.78399991989136,
"count": 509
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 509
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 509
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.02126805322865645,
"min": 0.011157380873434401,
"max": 0.024535768770147116,
"count": 245
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.02126805322865645,
"min": 0.011157380873434401,
"max": 0.024535768770147116,
"count": 245
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.11063808078567187,
"min": 0.0008122223856238028,
"max": 0.1258967603246371,
"count": 245
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.11063808078567187,
"min": 0.0008122223856238028,
"max": 0.1258967603246371,
"count": 245
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.11299070691068967,
"min": 0.0008620897366199643,
"max": 0.1278861070672671,
"count": 245
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.11299070691068967,
"min": 0.0008620897366199643,
"max": 0.1278861070672671,
"count": 245
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 245
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 245
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 245
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 245
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 245
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 245
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1695558805",
"python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\sanya\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos/SoccerTwos.exe --run-id=SoccerTwos1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1695573725"
},
"total": 14920.2060066,
"count": 1,
"self": 4.154124499998943,
"children": {
"run_training.setup": {
"total": 0.07894639999999997,
"count": 1,
"self": 0.07894639999999997
},
"TrainerController.start_learning": {
"total": 14915.9729357,
"count": 1,
"self": 9.15670090049207,
"children": {
"TrainerController._reset_env": {
"total": 3.404343799999321,
"count": 26,
"self": 3.404343799999321
},
"TrainerController.advance": {
"total": 14903.250481099509,
"count": 348115,
"self": 9.189602699574607,
"children": {
"env_step": {
"total": 6324.050142599904,
"count": 348115,
"self": 5052.85849279922,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1265.512111200247,
"count": 348115,
"self": 48.2890753008669,
"children": {
"TorchPolicy.evaluate": {
"total": 1217.2230358993802,
"count": 643006,
"self": 1217.2230358993802
}
}
},
"workers": {
"total": 5.679538600437461,
"count": 348114,
"self": 0.0,
"children": {
"worker_root": {
"total": 14900.018855200271,
"count": 348114,
"is_parallel": true,
"self": 10778.057084300364,
"children": {
"steps_from_proto": {
"total": 0.05163909999585847,
"count": 52,
"is_parallel": true,
"self": 0.009670500012492145,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.04196859998336633,
"count": 208,
"is_parallel": true,
"self": 0.04196859998336633
}
}
},
"UnityEnvironment.step": {
"total": 4121.910131799911,
"count": 348114,
"is_parallel": true,
"self": 215.7407381998064,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 156.97218570024103,
"count": 348114,
"is_parallel": true,
"self": 156.97218570024103
},
"communicator.exchange": {
"total": 3079.25841819985,
"count": 348114,
"is_parallel": true,
"self": 3079.25841819985
},
"steps_from_proto": {
"total": 669.9387897000138,
"count": 696228,
"is_parallel": true,
"self": 128.10606009812727,
"children": {
"_process_rank_one_or_two_observation": {
"total": 541.8327296018865,
"count": 2784912,
"is_parallel": true,
"self": 541.8327296018865
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 8570.010735800031,
"count": 348114,
"self": 64.94944539986318,
"children": {
"process_trajectory": {
"total": 1374.2275195001607,
"count": 348114,
"self": 1372.68464780016,
"children": {
"RLTrainer._checkpoint": {
"total": 1.54287170000066,
"count": 10,
"self": 1.54287170000066
}
}
},
"_update_policy": {
"total": 7130.833770900007,
"count": 245,
"self": 726.3377767999818,
"children": {
"TorchPOCAOptimizer.update": {
"total": 6404.495994100025,
"count": 7350,
"self": 6404.495994100025
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.7999994927085936e-06,
"count": 1,
"self": 2.7999994927085936e-06
},
"TrainerController._save_models": {
"total": 0.16140710000036051,
"count": 1,
"self": 0.008262199999080622,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1531449000012799,
"count": 1,
"self": 0.1531449000012799
}
}
}
}
}
}
}