poca-SoccerTwos / run_logs /timers.json
kreepy's picture
First Push`
7729eca
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.1925652027130127,
"min": 2.0357096195220947,
"max": 3.295631170272827,
"count": 800
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 42798.875,
"min": 29065.486328125,
"max": 110619.3515625,
"count": 800
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 504.55555555555554,
"max": 999.0,
"count": 800
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19980.0,
"min": 12200.0,
"max": 28524.0,
"count": 800
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1199.9334636218261,
"min": 1196.8453914730167,
"max": 1204.9000747049904,
"count": 65
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 2399.8669272436523,
"min": 2394.7921322129832,
"max": 9632.44062650026,
"count": 65
},
"SoccerTwos.Step.mean": {
"value": 7999944.0,
"min": 9278.0,
"max": 7999944.0,
"count": 800
},
"SoccerTwos.Step.sum": {
"value": 7999944.0,
"min": 9278.0,
"max": 7999944.0,
"count": 800
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.012500254437327385,
"min": -0.06711050122976303,
"max": 0.018256569281220436,
"count": 800
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 0.12500254809856415,
"min": -0.939547061920166,
"max": 0.18256568908691406,
"count": 800
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.012361116707324982,
"min": -0.06710167974233627,
"max": 0.013610780239105225,
"count": 800
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 0.12361116707324982,
"min": -0.9394006729125977,
"max": 0.13610780239105225,
"count": 800
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 800
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 800
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.0,
"min": -0.4185714285288538,
"max": 0.16814285516738892,
"count": 800
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 0.0,
"min": -5.8599999994039536,
"max": 2.353999972343445,
"count": 800
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.0,
"min": -0.4185714285288538,
"max": 0.16814285516738892,
"count": 800
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 0.0,
"min": -5.8599999994039536,
"max": 2.353999972343445,
"count": 800
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 800
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 800
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.013426195655483752,
"min": 0.010731164743386519,
"max": 0.022484100555690625,
"count": 365
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.013426195655483752,
"min": 0.010731164743386519,
"max": 0.022484100555690625,
"count": 365
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 2.2728424664819612e-05,
"min": 3.622488022510713e-10,
"max": 0.04969244154425117,
"count": 365
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 2.2728424664819612e-05,
"min": 3.622488022510713e-10,
"max": 0.04969244154425117,
"count": 365
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 3.371514682536751e-05,
"min": 4.852258724448906e-10,
"max": 0.053099278115799585,
"count": 365
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 3.371514682536751e-05,
"min": 4.852258724448906e-10,
"max": 0.053099278115799585,
"count": 365
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 365
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 365
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 365
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 365
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 365
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 365
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1683147653",
"python_version": "3.9.16 (main, Mar 8 2023, 10:39:24) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\kingk\\miniconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.0+cu117",
"numpy_version": "1.21.2",
"end_time_seconds": "1683159499"
},
"total": 11846.9075292,
"count": 1,
"self": 4.130778700000519,
"children": {
"run_training.setup": {
"total": 0.06870520000000013,
"count": 1,
"self": 0.06870520000000013
},
"TrainerController.start_learning": {
"total": 11842.7080453,
"count": 1,
"self": 8.680754000451998,
"children": {
"TrainerController._reset_env": {
"total": 5.516737000000538,
"count": 40,
"self": 5.516737000000538
},
"TrainerController.advance": {
"total": 11828.357280399548,
"count": 520568,
"self": 9.474384999253743,
"children": {
"env_step": {
"total": 9001.134529999916,
"count": 520568,
"self": 5258.721903999822,
"children": {
"SubprocessEnvManager._take_step": {
"total": 3736.4428117002003,
"count": 520568,
"self": 59.703648000168414,
"children": {
"TorchPolicy.evaluate": {
"total": 3676.739163700032,
"count": 1034124,
"self": 3676.739163700032
}
}
},
"workers": {
"total": 5.969814299893104,
"count": 520567,
"self": 0.0,
"children": {
"worker_root": {
"total": 11826.65870089953,
"count": 520567,
"is_parallel": true,
"self": 7688.050879899954,
"children": {
"steps_from_proto": {
"total": 0.0885296000048661,
"count": 80,
"is_parallel": true,
"self": 0.0183252000139964,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.07020439999086969,
"count": 320,
"is_parallel": true,
"self": 0.07020439999086969
}
}
},
"UnityEnvironment.step": {
"total": 4138.519291399571,
"count": 520567,
"is_parallel": true,
"self": 221.24659419827094,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 314.96789329995363,
"count": 520567,
"is_parallel": true,
"self": 314.96789329995363
},
"communicator.exchange": {
"total": 2890.5178727002058,
"count": 520567,
"is_parallel": true,
"self": 2890.5178727002058
},
"steps_from_proto": {
"total": 711.7869312011401,
"count": 1041134,
"is_parallel": true,
"self": 149.8315113017693,
"children": {
"_process_rank_one_or_two_observation": {
"total": 561.9554198993708,
"count": 4164536,
"is_parallel": true,
"self": 561.9554198993708
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2817.748365400379,
"count": 520567,
"self": 66.07151470030749,
"children": {
"process_trajectory": {
"total": 947.8506300000706,
"count": 520567,
"self": 944.8551793000722,
"children": {
"RLTrainer._checkpoint": {
"total": 2.9954506999984005,
"count": 16,
"self": 2.9954506999984005
}
}
},
"_update_policy": {
"total": 1803.8262207000012,
"count": 366,
"self": 1129.6175799999464,
"children": {
"TorchPOCAOptimizer.update": {
"total": 674.2086407000547,
"count": 10980,
"self": 674.2086407000547
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0999992809956893e-06,
"count": 1,
"self": 1.0999992809956893e-06
},
"TrainerController._save_models": {
"total": 0.15327280000019528,
"count": 1,
"self": 0.012785599999915576,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1404872000002797,
"count": 1,
"self": 0.1404872000002797
}
}
}
}
}
}
}