poca-SoccerTwos / run_logs /timers.json
homunculus's picture
First Push`
ba9192b
raw
history blame
15.6 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.9216433763504028,
"min": 1.920216679573059,
"max": 3.295745849609375,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 37694.95703125,
"min": 19058.66015625,
"max": 113299.515625,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 55.37931034482759,
"min": 43.267857142857146,
"max": 999.0,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19272.0,
"min": 16208.0,
"max": 23732.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1600.5512314996797,
"min": 1200.5430052443305,
"max": 1638.9960393886781,
"count": 494
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 278495.9142809443,
"min": 2405.555724286956,
"max": 361284.19197027653,
"count": 494
},
"SoccerTwos.Step.mean": {
"value": 4999994.0,
"min": 9834.0,
"max": 4999994.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 4999994.0,
"min": 9834.0,
"max": 4999994.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.05139904096722603,
"min": -0.09471949189901352,
"max": 0.17396052181720734,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -8.994832038879395,
"min": -15.416780471801758,
"max": 24.04250717163086,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.05457119643688202,
"min": -0.09382709115743637,
"max": 0.17802540957927704,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -9.549959182739258,
"min": -15.489582061767578,
"max": 24.71759796142578,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.19206628595079694,
"min": -0.5523200035095215,
"max": 0.477729034039282,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -33.611600041389465,
"min": -47.1439995765686,
"max": 54.32780051231384,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.19206628595079694,
"min": -0.5523200035095215,
"max": 0.477729034039282,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -33.611600041389465,
"min": -47.1439995765686,
"max": 54.32780051231384,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.016444309207145125,
"min": 0.011042615599095976,
"max": 0.02421950464583157,
"count": 240
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.016444309207145125,
"min": 0.011042615599095976,
"max": 0.02421950464583157,
"count": 240
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10301259011030198,
"min": 6.078811832897675e-05,
"max": 0.11490296199917793,
"count": 240
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10301259011030198,
"min": 6.078811832897675e-05,
"max": 0.11490296199917793,
"count": 240
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10457213396827379,
"min": 6.38940089023284e-05,
"max": 0.11693824951847394,
"count": 240
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10457213396827379,
"min": 6.38940089023284e-05,
"max": 0.11693824951847394,
"count": 240
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 240
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 240
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 240
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 240
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 240
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 240
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1703974276",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\roman\\miniconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1703986074"
},
"total": 11797.066388199979,
"count": 1,
"self": 0.336131599964574,
"children": {
"run_training.setup": {
"total": 0.24783700000261888,
"count": 1,
"self": 0.24783700000261888
},
"TrainerController.start_learning": {
"total": 11796.482419600012,
"count": 1,
"self": 7.5017640818841755,
"children": {
"TrainerController._reset_env": {
"total": 14.045037999982014,
"count": 25,
"self": 14.045037999982014
},
"TrainerController.advance": {
"total": 11774.808666818135,
"count": 339394,
"self": 8.221190334588755,
"children": {
"env_step": {
"total": 6060.28108600463,
"count": 339394,
"self": 4909.338518785953,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1146.3086297913687,
"count": 339394,
"self": 42.76119652070338,
"children": {
"TorchPolicy.evaluate": {
"total": 1103.5474332706654,
"count": 632062,
"self": 1103.5474332706654
}
}
},
"workers": {
"total": 4.63393742730841,
"count": 339394,
"self": 0.0,
"children": {
"worker_root": {
"total": 11773.352925502986,
"count": 339394,
"is_parallel": true,
"self": 7774.115271407994,
"children": {
"steps_from_proto": {
"total": 0.03963030007435009,
"count": 50,
"is_parallel": true,
"self": 0.008485200174618512,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.031145099899731576,
"count": 200,
"is_parallel": true,
"self": 0.031145099899731576
}
}
},
"UnityEnvironment.step": {
"total": 3999.1980237949174,
"count": 339394,
"is_parallel": true,
"self": 154.7719755655271,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 119.54345778923016,
"count": 339394,
"is_parallel": true,
"self": 119.54345778923016
},
"communicator.exchange": {
"total": 3238.660531307629,
"count": 339394,
"is_parallel": true,
"self": 3238.660531307629
},
"steps_from_proto": {
"total": 486.2220591325313,
"count": 678788,
"is_parallel": true,
"self": 109.23981188074686,
"children": {
"_process_rank_one_or_two_observation": {
"total": 376.98224725178443,
"count": 2715152,
"is_parallel": true,
"self": 376.98224725178443
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 5706.306390478916,
"count": 339394,
"self": 60.05203516012989,
"children": {
"process_trajectory": {
"total": 1075.505167618452,
"count": 339394,
"self": 1074.0611024184036,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4440652000484988,
"count": 10,
"self": 1.4440652000484988
}
}
},
"_update_policy": {
"total": 4570.749187700334,
"count": 240,
"self": 680.3354643014609,
"children": {
"TorchPOCAOptimizer.update": {
"total": 3890.413723398873,
"count": 7200,
"self": 3890.413723398873
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.00005330145359e-07,
"count": 1,
"self": 7.00005330145359e-07
},
"TrainerController._save_models": {
"total": 0.12695000000530854,
"count": 1,
"self": 0.00397080002585426,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12297919997945428,
"count": 1,
"self": 0.12297919997945428
}
}
}
}
}
}
}