poca-SoccerTwos / run_logs /timers.json
PabloTa's picture
First Push`
22c47b7
raw
history blame contribute delete
No virus
15.3 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.2777881622314453,
"min": 3.277698040008545,
"max": 3.2958109378814697,
"count": 10
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 84435.8203125,
"min": 27895.205078125,
"max": 105465.953125,
"count": 10
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 564.4444444444445,
"min": 564.4444444444445,
"max": 999.0,
"count": 10
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 20320.0,
"min": 11988.0,
"max": 28036.0,
"count": 10
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1200.1569488124733,
"min": 1199.3839273809324,
"max": 1200.1569488124733,
"count": 8
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 12001.569488124733,
"min": 2400.022827873145,
"max": 12001.569488124733,
"count": 8
},
"SoccerTwos.Step.mean": {
"value": 99730.0,
"min": 9034.0,
"max": 99730.0,
"count": 10
},
"SoccerTwos.Step.sum": {
"value": 99730.0,
"min": 9034.0,
"max": 99730.0,
"count": 10
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.04222764074802399,
"min": -0.0968908816576004,
"max": -0.04222764074802399,
"count": 10
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.7600975632667542,
"min": -1.0727790594100952,
"max": -0.5923495888710022,
"count": 10
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.04232519119977951,
"min": -0.09689081460237503,
"max": -0.04232519119977951,
"count": 10
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.7618534564971924,
"min": -1.0657989978790283,
"max": -0.5932878851890564,
"count": 10
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 10
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 10
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.24262222978803846,
"min": -0.2301230774475978,
"max": 0.2968571441514151,
"count": 10
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 4.367200136184692,
"min": -2.9916000068187714,
"max": 4.367200136184692,
"count": 10
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.24262222978803846,
"min": -0.2301230774475978,
"max": 0.2968571441514151,
"count": 10
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 4.367200136184692,
"min": -2.9916000068187714,
"max": 4.367200136184692,
"count": 10
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.018879526696400716,
"min": 0.013216279258206972,
"max": 0.018879526696400716,
"count": 4
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.018879526696400716,
"min": 0.013216279258206972,
"max": 0.018879526696400716,
"count": 4
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0011499579239170998,
"min": 0.0011499579239170998,
"max": 0.0015964658145094291,
"count": 4
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0011499579239170998,
"min": 0.0011499579239170998,
"max": 0.0015964658145094291,
"count": 4
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.0011461617114643255,
"min": 0.0011461617114643255,
"max": 0.001617215361329727,
"count": 4
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.0011461617114643255,
"min": 0.0011461617114643255,
"max": 0.001617215361329727,
"count": 4
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 4.7166084277999986e-05,
"min": 4.7166084277999986e-05,
"max": 0.0002341140219619999,
"count": 4
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 4.7166084277999986e-05,
"min": 4.7166084277999986e-05,
"max": 0.0002341140219619999,
"count": 4
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.11572199999999998,
"min": 0.11572199999999998,
"max": 0.17803799999999997,
"count": 4
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.11572199999999998,
"min": 0.11572199999999998,
"max": 0.17803799999999997,
"count": 4
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.0007945278000000002,
"min": 0.0007945278000000002,
"max": 0.0039040961999999998,
"count": 4
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.0007945278000000002,
"min": 0.0007945278000000002,
"max": 0.0039040961999999998,
"count": 4
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679918907",
"python_version": "3.9.10 (tags/v3.9.10:f2f3f53, Jan 17 2022, 15:14:21) [MSC v.1929 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\Melon\\PycharmProjects\\huggingface\\deep-rl-course\\venv\\Scripts\\mlagents-learn .\\SoccerTwos.yaml --env=.\\training-envs-executables\\SoccerTwos\\ --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.13.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1679919060"
},
"total": 152.71846240000002,
"count": 1,
"self": 0.4438656000000094,
"children": {
"run_training.setup": {
"total": 0.0783541000000001,
"count": 1,
"self": 0.0783541000000001
},
"TrainerController.start_learning": {
"total": 152.1962427,
"count": 1,
"self": 0.09687869999950749,
"children": {
"TrainerController._reset_env": {
"total": 3.0567960000000003,
"count": 1,
"self": 3.0567960000000003
},
"TrainerController.advance": {
"total": 148.9081946000005,
"count": 6839,
"self": 0.09801829999963729,
"children": {
"env_step": {
"total": 70.89547440000075,
"count": 6839,
"self": 53.72903609999963,
"children": {
"SubprocessEnvManager._take_step": {
"total": 17.10549290000064,
"count": 6839,
"self": 0.5554405000015166,
"children": {
"TorchPolicy.evaluate": {
"total": 16.550052399999124,
"count": 13610,
"self": 16.550052399999124
}
}
},
"workers": {
"total": 0.0609454000004801,
"count": 6839,
"self": 0.0,
"children": {
"worker_root": {
"total": 148.57212640000017,
"count": 6839,
"is_parallel": true,
"self": 104.62892350000091,
"children": {
"steps_from_proto": {
"total": 0.0011726000000003012,
"count": 2,
"is_parallel": true,
"self": 0.00022330000000003736,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0009493000000002638,
"count": 8,
"is_parallel": true,
"self": 0.0009493000000002638
}
}
},
"UnityEnvironment.step": {
"total": 43.94203029999927,
"count": 6839,
"is_parallel": true,
"self": 2.271892799998021,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 1.52210370000032,
"count": 6839,
"is_parallel": true,
"self": 1.52210370000032
},
"communicator.exchange": {
"total": 33.784643900000326,
"count": 6839,
"is_parallel": true,
"self": 33.784643900000326
},
"steps_from_proto": {
"total": 6.363389900000595,
"count": 13678,
"is_parallel": true,
"self": 1.2525943000015936,
"children": {
"_process_rank_one_or_two_observation": {
"total": 5.110795599999001,
"count": 54712,
"is_parallel": true,
"self": 5.110795599999001
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 77.91470190000014,
"count": 6839,
"self": 0.8459732000001594,
"children": {
"process_trajectory": {
"total": 13.26886649999997,
"count": 6839,
"self": 13.26886649999997
},
"_update_policy": {
"total": 63.79986220000001,
"count": 4,
"self": 6.665142300000127,
"children": {
"TorchPOCAOptimizer.update": {
"total": 57.13471989999988,
"count": 120,
"self": 57.13471989999988
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.999999987376214e-07,
"count": 1,
"self": 4.999999987376214e-07
},
"TrainerController._save_models": {
"total": 0.13437289999998825,
"count": 1,
"self": 0.00656009999997309,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12781280000001516,
"count": 1,
"self": 0.12781280000001516
}
}
}
}
}
}
}