poca-SoccerTwos / run_logs /timers.json
vubh2015's picture
Train for 10m
905004b
raw
history blame contribute delete
No virus
15.7 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.9140218496322632,
"min": 1.8841967582702637,
"max": 3.2789859771728516,
"count": 493
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 35707.9921875,
"min": 27597.958984375,
"max": 104927.5546875,
"count": 493
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 69.95652173913044,
"min": 43.792792792792795,
"max": 999.0,
"count": 493
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19308.0,
"min": 2168.0,
"max": 28124.0,
"count": 493
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 2052.649160655305,
"min": 1634.0065845004867,
"max": 2081.739670231482,
"count": 492
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 283265.58417043206,
"min": 3274.644846055666,
"max": 427167.9675037724,
"count": 492
},
"SoccerTwos.Step.mean": {
"value": 9999998.0,
"min": 5079036.0,
"max": 9999998.0,
"count": 493
},
"SoccerTwos.Step.sum": {
"value": 9999998.0,
"min": 5079036.0,
"max": 9999998.0,
"count": 493
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.025618843734264374,
"min": -0.12070416659116745,
"max": 0.20378956198692322,
"count": 493
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -3.535400390625,
"min": -21.968158721923828,
"max": 26.109474182128906,
"count": 493
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.03210558369755745,
"min": -0.1224394366145134,
"max": 0.20375539362430573,
"count": 493
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -4.430570602416992,
"min": -22.283977508544922,
"max": 24.444137573242188,
"count": 493
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 493
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 493
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.1318231881528661,
"min": -0.5799142860230946,
"max": 0.6240606050599705,
"count": 493
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -18.19159996509552,
"min": -65.19880020618439,
"max": 56.38820028305054,
"count": 493
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.1318231881528661,
"min": -0.5799142860230946,
"max": 0.6240606050599705,
"count": 493
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -18.19159996509552,
"min": -65.19880020618439,
"max": 56.38820028305054,
"count": 493
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 493
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 493
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.016820453786446404,
"min": 0.014308929060765271,
"max": 0.02052879839708718,
"count": 119
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.016820453786446404,
"min": 0.014308929060765271,
"max": 0.02052879839708718,
"count": 119
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.09343656189739705,
"min": 0.0028034542279783635,
"max": 0.10124819080034891,
"count": 119
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.09343656189739705,
"min": 0.0028034542279783635,
"max": 0.10124819080034891,
"count": 119
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09621581559379895,
"min": 0.0031789839442353694,
"max": 0.10505046372612317,
"count": 119
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.09621581559379895,
"min": 0.0031789839442353694,
"max": 0.10505046372612317,
"count": 119
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.00029500000000000007,
"min": 0.00029500000000000007,
"max": 0.00029500000000000007,
"count": 119
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.00029500000000000007,
"min": 0.00029500000000000007,
"max": 0.00029500000000000007,
"count": 119
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.19999999999999993,
"min": 0.19999999999999993,
"max": 0.19999999999999993,
"count": 119
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.19999999999999993,
"min": 0.19999999999999993,
"max": 0.19999999999999993,
"count": 119
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 119
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 119
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1689504139",
"python_version": "3.9.17 (main, Jul 5 2023, 20:47:11) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\84908\\.conda\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos2 --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1689546902"
},
"total": 42762.9120158,
"count": 1,
"self": 1.1350629000007757,
"children": {
"run_training.setup": {
"total": 0.15489059999999988,
"count": 1,
"self": 0.15489059999999988
},
"TrainerController.start_learning": {
"total": 42761.6220623,
"count": 1,
"self": 14.43110979921039,
"children": {
"TrainerController._reset_env": {
"total": 6.812163500008067,
"count": 21,
"self": 6.812163500008067
},
"TrainerController.advance": {
"total": 42740.09502890078,
"count": 335812,
"self": 13.688751599562238,
"children": {
"env_step": {
"total": 11367.246411900505,
"count": 335812,
"self": 8355.052144300767,
"children": {
"SubprocessEnvManager._take_step": {
"total": 3002.7884803989355,
"count": 335812,
"self": 97.30393870023727,
"children": {
"TorchPolicy.evaluate": {
"total": 2905.484541698698,
"count": 618688,
"self": 2905.484541698698
}
}
},
"workers": {
"total": 9.405787200801747,
"count": 335812,
"self": 0.0,
"children": {
"worker_root": {
"total": 42735.313393801036,
"count": 335812,
"is_parallel": true,
"self": 36066.35230810146,
"children": {
"steps_from_proto": {
"total": 0.07878310001497724,
"count": 42,
"is_parallel": true,
"self": 0.014979500020485759,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.06380359999449148,
"count": 168,
"is_parallel": true,
"self": 0.06380359999449148
}
}
},
"UnityEnvironment.step": {
"total": 6668.882302599564,
"count": 335812,
"is_parallel": true,
"self": 415.1706843983875,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 341.1210921979196,
"count": 335812,
"is_parallel": true,
"self": 341.1210921979196
},
"communicator.exchange": {
"total": 4620.200526800538,
"count": 335812,
"is_parallel": true,
"self": 4620.200526800538
},
"steps_from_proto": {
"total": 1292.389999202719,
"count": 671624,
"is_parallel": true,
"self": 248.6019459020622,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1043.7880533006569,
"count": 2686496,
"is_parallel": true,
"self": 1043.7880533006569
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 31359.159865400714,
"count": 335812,
"self": 101.92826570027319,
"children": {
"process_trajectory": {
"total": 4188.981500100416,
"count": 335812,
"self": 4185.253818100407,
"children": {
"RLTrainer._checkpoint": {
"total": 3.7276820000088264,
"count": 10,
"self": 3.7276820000088264
}
}
},
"_update_policy": {
"total": 27068.250099600024,
"count": 119,
"self": 1198.192632999806,
"children": {
"TorchPOCAOptimizer.update": {
"total": 25870.057466600218,
"count": 7140,
"self": 25870.057466600218
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.0000006770715117e-06,
"count": 1,
"self": 2.0000006770715117e-06
},
"TrainerController._save_models": {
"total": 0.2837580999985221,
"count": 1,
"self": 0.00894529999641236,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2748128000021097,
"count": 1,
"self": 0.2748128000021097
}
}
}
}
}
}
}