poca-SoccerTwos / run_logs /timers.json
Eldund's picture
First Push`
c96e5b8
raw
history blame
15.6 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.1884450912475586,
"min": 3.1543173789978027,
"max": 3.295722007751465,
"count": 100
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 44383.15625,
"min": 5364.7880859375,
"max": 105463.1015625,
"count": 100
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 918.4,
"min": 393.54545454545456,
"max": 999.0,
"count": 100
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 18368.0,
"min": 3996.0,
"max": 28172.0,
"count": 100
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1208.8509585019856,
"min": 1194.0402364530805,
"max": 1212.045870328748,
"count": 81
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 2417.701917003971,
"min": 2388.080472906161,
"max": 14481.386916658726,
"count": 81
},
"SoccerTwos.Step.mean": {
"value": 999252.0,
"min": 9062.0,
"max": 999252.0,
"count": 100
},
"SoccerTwos.Step.sum": {
"value": 999252.0,
"min": 9062.0,
"max": 999252.0,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.010445845313370228,
"min": -0.07581650465726852,
"max": 0.019616320729255676,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.11490429937839508,
"min": -0.9097980856895447,
"max": 0.23539584875106812,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.010502335615456104,
"min": -0.07582205533981323,
"max": 0.019769296050071716,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.11552569270133972,
"min": -0.9098646640777588,
"max": 0.2372315526008606,
"count": 100
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 100
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.07312727516347711,
"min": -0.41153333336114883,
"max": 0.36257500760257244,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 0.8044000267982483,
"min": -6.468800067901611,
"max": 5.801200121641159,
"count": 100
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.07312727516347711,
"min": -0.41153333336114883,
"max": 0.36257500760257244,
"count": 100
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 0.8044000267982483,
"min": -6.468800067901611,
"max": 5.801200121641159,
"count": 100
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.015264107022327758,
"min": 0.012924301449675112,
"max": 0.023621039185672998,
"count": 46
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.015264107022327758,
"min": 0.012924301449675112,
"max": 0.023621039185672998,
"count": 46
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0012293039365128303,
"min": 4.021294789708918e-05,
"max": 0.005852521955966949,
"count": 46
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0012293039365128303,
"min": 4.021294789708918e-05,
"max": 0.005852521955966949,
"count": 46
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.0012328454526141287,
"min": 3.709641429547143e-05,
"max": 0.006051869333411257,
"count": 46
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.0012328454526141287,
"min": 3.709641429547143e-05,
"max": 0.006051869333411257,
"count": 46
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 46
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 46
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 46
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 46
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 46
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 46
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1684797705",
"python_version": "3.9.16 (main, Mar 8 2023, 10:39:24) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\Alfin\\.conda\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1684800727"
},
"total": 3021.412926,
"count": 1,
"self": 0.17689329999984693,
"children": {
"run_training.setup": {
"total": 0.052013899999999946,
"count": 1,
"self": 0.052013899999999946
},
"TrainerController.start_learning": {
"total": 3021.1840188,
"count": 1,
"self": 2.0456582000333583,
"children": {
"TrainerController._reset_env": {
"total": 3.076991899999862,
"count": 10,
"self": 3.076991899999862
},
"TrainerController.advance": {
"total": 3015.9242281999673,
"count": 67283,
"self": 2.1239989999580757,
"children": {
"env_step": {
"total": 1503.0990195000438,
"count": 67283,
"self": 1055.9908713999403,
"children": {
"SubprocessEnvManager._take_step": {
"total": 445.81550570008653,
"count": 67283,
"self": 13.244350500097084,
"children": {
"TorchPolicy.evaluate": {
"total": 432.57115519998945,
"count": 133582,
"self": 432.57115519998945
}
}
},
"workers": {
"total": 1.2926424000169146,
"count": 67283,
"self": 0.0,
"children": {
"worker_root": {
"total": 3014.9546084999756,
"count": 67283,
"is_parallel": true,
"self": 2221.3383639999324,
"children": {
"steps_from_proto": {
"total": 0.03136619999971435,
"count": 20,
"is_parallel": true,
"self": 0.004794099998373547,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.026572100001340804,
"count": 80,
"is_parallel": true,
"self": 0.026572100001340804
}
}
},
"UnityEnvironment.step": {
"total": 793.5848783000436,
"count": 67283,
"is_parallel": true,
"self": 44.424546500087786,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 52.24842179995653,
"count": 67283,
"is_parallel": true,
"self": 52.24842179995653
},
"communicator.exchange": {
"total": 542.3120904000126,
"count": 67283,
"is_parallel": true,
"self": 542.3120904000126
},
"steps_from_proto": {
"total": 154.5998195999866,
"count": 134566,
"is_parallel": true,
"self": 34.44958509982793,
"children": {
"_process_rank_one_or_two_observation": {
"total": 120.15023450015867,
"count": 538264,
"is_parallel": true,
"self": 120.15023450015867
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1510.7012096999651,
"count": 67283,
"self": 12.211857299984786,
"children": {
"process_trajectory": {
"total": 241.55741609998103,
"count": 67283,
"self": 240.38777189998126,
"children": {
"RLTrainer._checkpoint": {
"total": 1.169644199999766,
"count": 2,
"self": 1.169644199999766
}
}
},
"_update_policy": {
"total": 1256.9319362999993,
"count": 46,
"self": 152.37882540000396,
"children": {
"TorchPOCAOptimizer.update": {
"total": 1104.5531108999953,
"count": 1389,
"self": 1104.5531108999953
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.99999883788405e-07,
"count": 1,
"self": 9.99999883788405e-07
},
"TrainerController._save_models": {
"total": 0.13713949999964825,
"count": 1,
"self": 0.020320199999332544,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1168193000003157,
"count": 1,
"self": 0.1168193000003157
}
}
}
}
}
}
}