poca-SoccerTwos / run_logs /timers.json
rzambrano's picture
First Push
c0a669f
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.1654348373413086,
"min": 3.143012523651123,
"max": 3.1917285919189453,
"count": 47
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 69994.09375,
"min": 12469.833984375,
"max": 101995.203125,
"count": 47
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 887.3333333333334,
"min": 457.125,
"max": 999.0,
"count": 47
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 21296.0,
"min": 7992.0,
"max": 31968.0,
"count": 47
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1209.033305119525,
"min": 1205.8941510006202,
"max": 1209.660612221576,
"count": 31
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 4836.1332204781,
"min": 2411.7883020012405,
"max": 7257.9636733294565,
"count": 31
},
"SoccerTwos.Step.mean": {
"value": 4999800.0,
"min": 4539652.0,
"max": 4999800.0,
"count": 47
},
"SoccerTwos.Step.sum": {
"value": 4999800.0,
"min": 4539652.0,
"max": 4999800.0,
"count": 47
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.006127266213297844,
"min": -0.01750943809747696,
"max": 0.004200804512947798,
"count": 47
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.07352719455957413,
"min": -0.22762270271778107,
"max": 0.04200804606080055,
"count": 47
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.005806541070342064,
"min": -0.017375219613313675,
"max": 0.0042443894781172276,
"count": 47
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.06967849284410477,
"min": -0.22587786614894867,
"max": 0.0424438938498497,
"count": 47
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 47
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 47
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.11169999837875366,
"min": -0.4166666666666667,
"max": 0.2304285764694214,
"count": 47
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -1.340399980545044,
"min": -5.0,
"max": 3.2260000705718994,
"count": 47
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.11169999837875366,
"min": -0.4166666666666667,
"max": 0.2304285764694214,
"count": 47
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -1.340399980545044,
"min": -5.0,
"max": 3.2260000705718994,
"count": 47
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 47
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 47
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.014754182616403948,
"min": 0.010258695079634587,
"max": 0.023413337239374717,
"count": 20
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.014754182616403948,
"min": 0.010258695079634587,
"max": 0.023413337239374717,
"count": 20
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0016654776758514346,
"min": 5.434628244908406e-07,
"max": 0.003290623868815601,
"count": 20
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0016654776758514346,
"min": 5.434628244908406e-07,
"max": 0.003290623868815601,
"count": 20
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.001665588014293462,
"min": 5.792501596602051e-07,
"max": 0.0033041236456483603,
"count": 20
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.001665588014293462,
"min": 5.792501596602051e-07,
"max": 0.0033041236456483603,
"count": 20
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 20
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 20
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 20
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 20
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 20
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690767013",
"python_version": "3.10.10 | packaged by conda-forge | (main, Mar 24 2023, 20:08:06) [GCC 11.3.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn /content/ml-agents/config/poca/SoccerTwos.yaml --env=/content/ml-agents/training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos1 --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1690768169"
},
"total": 1155.8800393410002,
"count": 1,
"self": 0.850588596000307,
"children": {
"run_training.setup": {
"total": 0.020737078000820475,
"count": 1,
"self": 0.020737078000820475
},
"TrainerController.start_learning": {
"total": 1155.008713666999,
"count": 1,
"self": 0.8899918940569478,
"children": {
"TrainerController._reset_env": {
"total": 4.24025274899941,
"count": 4,
"self": 4.24025274899941
},
"TrainerController.advance": {
"total": 1149.478719135941,
"count": 30839,
"self": 0.909951285319039,
"children": {
"env_step": {
"total": 940.7512788088661,
"count": 30839,
"self": 746.9692555987604,
"children": {
"SubprocessEnvManager._take_step": {
"total": 193.28846059587522,
"count": 30839,
"self": 5.498512304882752,
"children": {
"TorchPolicy.evaluate": {
"total": 187.78994829099247,
"count": 61274,
"self": 187.78994829099247
}
}
},
"workers": {
"total": 0.49356261423054093,
"count": 30839,
"self": 0.0,
"children": {
"worker_root": {
"total": 1151.5397765659345,
"count": 30839,
"is_parallel": true,
"self": 520.3601140870705,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0027938249986618757,
"count": 2,
"is_parallel": true,
"self": 0.0007248569945659256,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00206896800409595,
"count": 8,
"is_parallel": true,
"self": 0.00206896800409595
}
}
},
"UnityEnvironment.step": {
"total": 0.039516434000688605,
"count": 1,
"is_parallel": true,
"self": 0.001153601000623894,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0008682339994265931,
"count": 1,
"is_parallel": true,
"self": 0.0008682339994265931
},
"communicator.exchange": {
"total": 0.03387377500075672,
"count": 1,
"is_parallel": true,
"self": 0.03387377500075672
},
"steps_from_proto": {
"total": 0.003620823999881395,
"count": 2,
"is_parallel": true,
"self": 0.0006836849988758331,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002937139001005562,
"count": 8,
"is_parallel": true,
"self": 0.002937139001005562
}
}
}
}
}
}
},
"steps_from_proto": {
"total": 0.006089714997870033,
"count": 6,
"is_parallel": true,
"self": 0.0011341709996486316,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.004955543998221401,
"count": 24,
"is_parallel": true,
"self": 0.004955543998221401
}
}
},
"UnityEnvironment.step": {
"total": 631.1735727638661,
"count": 30838,
"is_parallel": true,
"self": 35.18660499447469,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.12658273210218,
"count": 30838,
"is_parallel": true,
"self": 22.12658273210218
},
"communicator.exchange": {
"total": 457.5699802731524,
"count": 30838,
"is_parallel": true,
"self": 457.5699802731524
},
"steps_from_proto": {
"total": 116.29040476413684,
"count": 61676,
"is_parallel": true,
"self": 19.048101971266078,
"children": {
"_process_rank_one_or_two_observation": {
"total": 97.24230279287076,
"count": 246704,
"is_parallel": true,
"self": 97.24230279287076
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 207.81748904175583,
"count": 30839,
"self": 7.11007162349415,
"children": {
"process_trajectory": {
"total": 51.426410633261185,
"count": 30839,
"self": 51.00422205026189,
"children": {
"RLTrainer._checkpoint": {
"total": 0.42218858299929707,
"count": 1,
"self": 0.42218858299929707
}
}
},
"_update_policy": {
"total": 149.2810067850005,
"count": 20,
"self": 97.41269202499825,
"children": {
"TorchPOCAOptimizer.update": {
"total": 51.86831476000225,
"count": 627,
"self": 51.86831476000225
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5290006558643654e-06,
"count": 1,
"self": 1.5290006558643654e-06
},
"TrainerController._save_models": {
"total": 0.39974835900102335,
"count": 1,
"self": 0.01136390900137485,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3883844499996485,
"count": 1,
"self": 0.3883844499996485
}
}
}
}
}
}
}