poca-SoccerTwos / run_logs /timers.json
giovannidispoto's picture
First Push`
ceec6cc
raw
history blame
No virus
15.2 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.2730460166931152,
"min": 3.2722370624542236,
"max": 3.295734167098999,
"count": 13
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 72373.59375,
"min": 19247.623046875,
"max": 105463.4765625,
"count": 13
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 830.0,
"min": 727.5,
"max": 999.0,
"count": 13
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19920.0,
"min": 16748.0,
"max": 26100.0,
"count": 13
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1204.286574055704,
"min": 1199.9602811060863,
"max": 1204.286574055704,
"count": 12
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 4817.146296222816,
"min": 2402.3965187876356,
"max": 9599.68224884869,
"count": 12
},
"SoccerTwos.Step.mean": {
"value": 129820.0,
"min": 9066.0,
"max": 129820.0,
"count": 13
},
"SoccerTwos.Step.sum": {
"value": 129820.0,
"min": 9066.0,
"max": 129820.0,
"count": 13
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.0008173122187145054,
"min": -0.00990950409322977,
"max": 0.0052213906310498714,
"count": 13
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.009807746857404709,
"min": -0.12882354855537415,
"max": 0.06265668570995331,
"count": 13
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.0019189155427739024,
"min": -0.00903354398906231,
"max": 0.005207187030464411,
"count": 13
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.02302698604762554,
"min": -0.11743606626987457,
"max": 0.06248624250292778,
"count": 13
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 13
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 13
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.061700006326039634,
"min": -0.3194153859065129,
"max": 0.20903334021568298,
"count": 13
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -0.7404000759124756,
"min": -4.152400016784668,
"max": 2.508400082588196,
"count": 13
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.061700006326039634,
"min": -0.3194153859065129,
"max": 0.20903334021568298,
"count": 13
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -0.7404000759124756,
"min": -4.152400016784668,
"max": 2.508400082588196,
"count": 13
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 13
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 13
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.012841316836420447,
"min": 0.012841316836420447,
"max": 0.017041035451984498,
"count": 6
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.012841316836420447,
"min": 0.012841316836420447,
"max": 0.017041035451984498,
"count": 6
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0021382930882585544,
"min": 0.0016193612692101548,
"max": 0.003799449202294151,
"count": 6
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0021382930882585544,
"min": 0.0016193612692101548,
"max": 0.003799449202294151,
"count": 6
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.0021427319734357297,
"min": 0.0016194361300828557,
"max": 0.003858683972309033,
"count": 6
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.0021427319734357297,
"min": 0.0016194361300828557,
"max": 0.003858683972309033,
"count": 6
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 6
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 6
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 6
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 6
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 6
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 6
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1689870004",
"python_version": "3.9.16 | packaged by conda-forge | (main, Feb 1 2023, 21:38:11) \n[Clang 14.0.6 ]",
"command_line_arguments": "/Users/giovannidispoto/miniforge3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env training-envs-executables/linux/SoccerTwos/SoccerTwos --run-id =SoccerTwos training --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0",
"numpy_version": "1.21.2",
"end_time_seconds": "1689870497"
},
"total": 493.635118291,
"count": 1,
"self": 0.005367124000088097,
"children": {
"run_training.setup": {
"total": 0.012915374999999951,
"count": 1,
"self": 0.012915374999999951
},
"TrainerController.start_learning": {
"total": 493.61683579199996,
"count": 1,
"self": 0.09755877800296275,
"children": {
"TrainerController._reset_env": {
"total": 3.325448333,
"count": 1,
"self": 3.325448333
},
"TrainerController.advance": {
"total": 489.970734097997,
"count": 8888,
"self": 0.0966509849946533,
"children": {
"env_step": {
"total": 396.18781395499997,
"count": 8888,
"self": 380.7007259920022,
"children": {
"SubprocessEnvManager._take_step": {
"total": 15.422013874997418,
"count": 8888,
"self": 0.5089644479970659,
"children": {
"TorchPolicy.evaluate": {
"total": 14.913049427000352,
"count": 17648,
"self": 14.913049427000352
}
}
},
"workers": {
"total": 0.06507408800034975,
"count": 8887,
"self": 0.0,
"children": {
"worker_root": {
"total": 486.1569761020012,
"count": 8887,
"is_parallel": true,
"self": 122.89936898400254,
"children": {
"steps_from_proto": {
"total": 0.0015613330000001646,
"count": 2,
"is_parallel": true,
"self": 0.000280333000000077,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012810000000000876,
"count": 8,
"is_parallel": true,
"self": 0.0012810000000000876
}
}
},
"UnityEnvironment.step": {
"total": 363.25604578499866,
"count": 8887,
"is_parallel": true,
"self": 0.9835880289940064,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.796755179999298,
"count": 8887,
"is_parallel": true,
"self": 5.796755179999298
},
"communicator.exchange": {
"total": 345.07033209000235,
"count": 8887,
"is_parallel": true,
"self": 345.07033209000235
},
"steps_from_proto": {
"total": 11.405370486002987,
"count": 17774,
"is_parallel": true,
"self": 1.5637338320070846,
"children": {
"_process_rank_one_or_two_observation": {
"total": 9.841636653995902,
"count": 71096,
"is_parallel": true,
"self": 9.841636653995902
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 93.68626915800243,
"count": 8887,
"self": 0.7808035860016105,
"children": {
"process_trajectory": {
"total": 13.667919948000808,
"count": 8887,
"self": 13.667919948000808
},
"_update_policy": {
"total": 79.237545624,
"count": 6,
"self": 10.890855251999994,
"children": {
"TorchPOCAOptimizer.update": {
"total": 68.34669037200001,
"count": 180,
"self": 68.34669037200001
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.7499999987412593e-05,
"count": 1,
"self": 2.7499999987412593e-05
},
"TrainerController._save_models": {
"total": 0.22306708299998945,
"count": 1,
"self": 0.0021550000000161162,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22091208299997334,
"count": 1,
"self": 0.22091208299997334
}
}
}
}
}
}
}