poca-SoccerTwos / run_logs /timers.json
eugene6's picture
First Push
cae93d7
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.4617689847946167,
"min": 1.332874059677124,
"max": 3.2957069873809814,
"count": 5000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 27832.08203125,
"min": 21882.611328125,
"max": 118871.578125,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 69.49295774647888,
"min": 40.5,
"max": 999.0,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19736.0,
"min": 3996.0,
"max": 31004.0,
"count": 5000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1650.5768481760022,
"min": 1192.1416974634824,
"max": 1707.1835606316117,
"count": 4997
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 234381.9124409923,
"min": 2384.283394926965,
"max": 390961.1526288972,
"count": 4997
},
"SoccerTwos.Step.mean": {
"value": 49999951.0,
"min": 9786.0,
"max": 49999951.0,
"count": 5000
},
"SoccerTwos.Step.sum": {
"value": 49999951.0,
"min": 9786.0,
"max": 49999951.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.012258904054760933,
"min": -0.148080512881279,
"max": 0.16084472835063934,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 1.7407643795013428,
"min": -28.43145751953125,
"max": 34.14482879638672,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.013871767558157444,
"min": -0.14815695583820343,
"max": 0.15961363911628723,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 1.9697909355163574,
"min": -28.446134567260742,
"max": 33.1370964050293,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.08832675974133988,
"min": -0.6386888888147142,
"max": 0.5193379254176699,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 12.542399883270264,
"min": -80.83239984512329,
"max": 63.52080023288727,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.08832675974133988,
"min": -0.6386888888147142,
"max": 0.5193379254176699,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 12.542399883270264,
"min": -80.83239984512329,
"max": 63.52080023288727,
"count": 5000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.017822199609630236,
"min": 0.009487322757316482,
"max": 0.02624323091780146,
"count": 2424
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.017822199609630236,
"min": 0.009487322757316482,
"max": 0.02624323091780146,
"count": 2424
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.09293877705931664,
"min": 0.0005154974768326307,
"max": 0.126951323201259,
"count": 2424
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.09293877705931664,
"min": 0.0005154974768326307,
"max": 0.126951323201259,
"count": 2424
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09431866630911827,
"min": 0.000516512922088926,
"max": 0.12884828920165697,
"count": 2424
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.09431866630911827,
"min": 0.000516512922088926,
"max": 0.12884828920165697,
"count": 2424
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2424
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2424
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 2424
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 2424
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 2424
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 2424
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1695908809",
"python_version": "3.9.18 (main, Sep 11 2023, 13:41:44) \n[GCC 11.2.0]",
"command_line_arguments": "/opt/conda/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos --run-id=SoccerTwos --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1696005305"
},
"total": 96496.132617792,
"count": 1,
"self": 0.3784213839971926,
"children": {
"run_training.setup": {
"total": 0.007320440003240947,
"count": 1,
"self": 0.007320440003240947
},
"TrainerController.start_learning": {
"total": 96495.746875968,
"count": 1,
"self": 57.77581728520454,
"children": {
"TrainerController._reset_env": {
"total": 3.8935154349201184,
"count": 250,
"self": 3.8935154349201184
},
"TrainerController.advance": {
"total": 96433.8697349739,
"count": 3447379,
"self": 63.16692418958701,
"children": {
"env_step": {
"total": 46806.88848410416,
"count": 3447379,
"self": 41522.558002341044,
"children": {
"SubprocessEnvManager._take_step": {
"total": 5246.375490510603,
"count": 3447379,
"self": 286.0012961551147,
"children": {
"TorchPolicy.evaluate": {
"total": 4960.374194355489,
"count": 6281010,
"self": 4960.374194355489
}
}
},
"workers": {
"total": 37.954991252514446,
"count": 3447379,
"self": 0.0,
"children": {
"worker_root": {
"total": 96363.02297746808,
"count": 3447379,
"is_parallel": true,
"self": 60541.84459547478,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0014306179982668255,
"count": 2,
"is_parallel": true,
"self": 0.00039618698792764917,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010344310103391763,
"count": 8,
"is_parallel": true,
"self": 0.0010344310103391763
}
}
},
"UnityEnvironment.step": {
"total": 0.018666894000489265,
"count": 1,
"is_parallel": true,
"self": 0.0003737270017154515,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002466320001985878,
"count": 1,
"is_parallel": true,
"self": 0.0002466320001985878
},
"communicator.exchange": {
"total": 0.016957683001237456,
"count": 1,
"is_parallel": true,
"self": 0.016957683001237456
},
"steps_from_proto": {
"total": 0.0010888519973377697,
"count": 2,
"is_parallel": true,
"self": 0.00029736200303887017,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007914899942988995,
"count": 8,
"is_parallel": true,
"self": 0.0007914899942988995
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 35820.938156066215,
"count": 3447378,
"is_parallel": true,
"self": 1049.2069826879306,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 618.7168775363389,
"count": 3447378,
"is_parallel": true,
"self": 618.7168775363389
},
"communicator.exchange": {
"total": 31117.198143229813,
"count": 3447378,
"is_parallel": true,
"self": 31117.198143229813
},
"steps_from_proto": {
"total": 3035.8161526121294,
"count": 6894756,
"is_parallel": true,
"self": 619.946729455336,
"children": {
"_process_rank_one_or_two_observation": {
"total": 2415.8694231567933,
"count": 27579024,
"is_parallel": true,
"self": 2415.8694231567933
}
}
}
}
},
"steps_from_proto": {
"total": 0.24022592708206503,
"count": 498,
"is_parallel": true,
"self": 0.0490387380195898,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.19118718906247523,
"count": 1992,
"is_parallel": true,
"self": 0.19118718906247523
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 49563.81432668014,
"count": 3447379,
"self": 463.47807261179696,
"children": {
"process_trajectory": {
"total": 5812.134094873258,
"count": 3447379,
"self": 5800.489614529342,
"children": {
"RLTrainer._checkpoint": {
"total": 11.644480343915347,
"count": 100,
"self": 11.644480343915347
}
}
},
"_update_policy": {
"total": 43288.20215919509,
"count": 2424,
"self": 4643.756068403927,
"children": {
"TorchPOCAOptimizer.update": {
"total": 38644.44609079116,
"count": 72732,
"self": 38644.44609079116
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.669862730428576e-07,
"count": 1,
"self": 5.669862730428576e-07
},
"TrainerController._save_models": {
"total": 0.20780770700366702,
"count": 1,
"self": 0.00687030800327193,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2009373990003951,
"count": 1,
"self": 0.2009373990003951
}
}
}
}
}
}
}