ppo-SpaceMan-T1 / run_logs /timers.json
Marco A. Egea Moreno
Second Model
aee1276 verified
raw
history blame contribute delete
No virus
13.9 kB
{
"name": "root",
"gauges": {
"SpaceMan.Policy.Entropy.mean": {
"value": 1.4396039247512817,
"min": 1.404689073562622,
"max": 1.449188232421875,
"count": 300
},
"SpaceMan.Policy.Entropy.sum": {
"value": 14423.3916015625,
"min": 13927.4931640625,
"max": 14607.111328125,
"count": 300
},
"SpaceMan.Step.mean": {
"value": 2999989.0,
"min": 9909.0,
"max": 2999989.0,
"count": 300
},
"SpaceMan.Step.sum": {
"value": 2999989.0,
"min": 9909.0,
"max": 2999989.0,
"count": 300
},
"SpaceMan.Policy.ExtrinsicValueEstimate.mean": {
"value": 5.108259677886963,
"min": -0.0456768199801445,
"max": 5.308773517608643,
"count": 300
},
"SpaceMan.Policy.ExtrinsicValueEstimate.sum": {
"value": 541.4755249023438,
"min": -4.933096408843994,
"max": 557.4212036132812,
"count": 300
},
"SpaceMan.Environment.EpisodeLength.mean": {
"value": 743.2142857142857,
"min": 239.0,
"max": 1272.4285714285713,
"count": 300
},
"SpaceMan.Environment.EpisodeLength.sum": {
"value": 10405.0,
"min": 8081.0,
"max": 12535.0,
"count": 300
},
"SpaceMan.Environment.CumulativeReward.mean": {
"value": 58.78126232440655,
"min": 10.89677467800322,
"max": 105.08675193786621,
"count": 300
},
"SpaceMan.Environment.CumulativeReward.sum": {
"value": 764.1564102172852,
"min": 457.66453647613525,
"max": 936.7664852142334,
"count": 300
},
"SpaceMan.Policy.ExtrinsicReward.mean": {
"value": 58.78126232440655,
"min": 10.89677467800322,
"max": 105.08675193786621,
"count": 300
},
"SpaceMan.Policy.ExtrinsicReward.sum": {
"value": 764.1564102172852,
"min": 457.66453647613525,
"max": 936.7664852142334,
"count": 300
},
"SpaceMan.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 300
},
"SpaceMan.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 300
},
"SpaceMan.Losses.PolicyLoss.mean": {
"value": 0.017731686183484273,
"min": 0.017370412978576496,
"max": 0.018258020761189982,
"count": 14
},
"SpaceMan.Losses.PolicyLoss.sum": {
"value": 0.017731686183484273,
"min": 0.017370412978576496,
"max": 0.018258020761189982,
"count": 14
},
"SpaceMan.Losses.ValueLoss.mean": {
"value": 1.5500787984132767,
"min": 0.6227473085820675,
"max": 1.6574717750549317,
"count": 14
},
"SpaceMan.Losses.ValueLoss.sum": {
"value": 1.5500787984132767,
"min": 0.6227473085820675,
"max": 1.6574717750549317,
"count": 14
},
"SpaceMan.Policy.LearningRate.mean": {
"value": 1.3200195599966665e-05,
"min": 1.3200195599966665e-05,
"max": 0.0002795162068279333,
"count": 14
},
"SpaceMan.Policy.LearningRate.sum": {
"value": 1.3200195599966665e-05,
"min": 1.3200195599966665e-05,
"max": 0.0002795162068279333,
"count": 14
},
"SpaceMan.Policy.Epsilon.mean": {
"value": 0.10440003333333331,
"min": 0.10440003333333331,
"max": 0.19317206666666664,
"count": 14
},
"SpaceMan.Policy.Epsilon.sum": {
"value": 0.10440003333333331,
"min": 0.10440003333333331,
"max": 0.19317206666666664,
"count": 14
},
"SpaceMan.Policy.Beta.mean": {
"value": 0.005,
"min": 0.005,
"max": 0.005,
"count": 14
},
"SpaceMan.Policy.Beta.sum": {
"value": 0.005,
"min": 0.005,
"max": 0.005,
"count": 14
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1708233795",
"python_version": "3.10.11 (tags/v3.10.11:7d4cc5a, Apr 5 2023, 00:38:17) [MSC v.1929 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\marko\\Documents\\UNITY\\ML-Agents\\.venv\\Scripts\\mlagents-learn config/ppo/SpaceManConfig.yaml --run-id=SpaceMan",
"mlagents_version": "1.0.0",
"mlagents_envs_version": "1.0.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.13.1+cu117",
"numpy_version": "1.23.4",
"end_time_seconds": "1708251695"
},
"total": 17899.673288199992,
"count": 1,
"self": 0.050844899960793555,
"children": {
"run_training.setup": {
"total": 0.094327200029511,
"count": 1,
"self": 0.094327200029511
},
"TrainerController.start_learning": {
"total": 17899.528116100002,
"count": 1,
"self": 48.87804145150585,
"children": {
"TrainerController._reset_env": {
"total": 20.616248000005726,
"count": 1,
"self": 20.616248000005726
},
"TrainerController.advance": {
"total": 17829.817165548448,
"count": 3007047,
"self": 43.271958130644634,
"children": {
"env_step": {
"total": 15947.844941709773,
"count": 3007047,
"self": 8307.44479442708,
"children": {
"SubprocessEnvManager._take_step": {
"total": 7608.474235826696,
"count": 3007047,
"self": 144.90336746699177,
"children": {
"TorchPolicy.evaluate": {
"total": 7463.570868359704,
"count": 3000019,
"self": 7463.570868359704
}
}
},
"workers": {
"total": 31.925911455997266,
"count": 3007047,
"self": 0.0,
"children": {
"worker_root": {
"total": 17826.03124869382,
"count": 3007047,
"is_parallel": true,
"self": 11705.28407783201,
"children": {
"steps_from_proto": {
"total": 0.0030157999717630446,
"count": 1,
"is_parallel": true,
"self": 0.00010429997928440571,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002911499992478639,
"count": 2,
"is_parallel": true,
"self": 0.002911499992478639
}
}
},
"UnityEnvironment.step": {
"total": 6120.744155061839,
"count": 3007047,
"is_parallel": true,
"self": 185.63163686619373,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 148.9114509843639,
"count": 3007047,
"is_parallel": true,
"self": 148.9114509843639
},
"communicator.exchange": {
"total": 5352.722329935117,
"count": 3007047,
"is_parallel": true,
"self": 5352.722329935117
},
"steps_from_proto": {
"total": 433.4787372761639,
"count": 3007047,
"is_parallel": true,
"self": 200.26946144725662,
"children": {
"_process_rank_one_or_two_observation": {
"total": 233.20927582890727,
"count": 6014094,
"is_parallel": true,
"self": 233.20927582890727
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1838.7002657080302,
"count": 3007047,
"self": 52.23635603673756,
"children": {
"process_trajectory": {
"total": 292.4972032713704,
"count": 3007047,
"self": 290.8138325713808,
"children": {
"RLTrainer._checkpoint": {
"total": 1.6833706999896094,
"count": 6,
"self": 1.6833706999896094
}
}
},
"_update_policy": {
"total": 1493.9667063999223,
"count": 14,
"self": 1025.7034898959682,
"children": {
"TorchPPOOptimizer.update": {
"total": 468.263216503954,
"count": 14000,
"self": 468.263216503954
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.200009137392044e-06,
"count": 1,
"self": 1.200009137392044e-06
},
"TrainerController._save_models": {
"total": 0.2166599000338465,
"count": 1,
"self": 0.01178780006011948,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20487209997372702,
"count": 1,
"self": 0.20487209997372702
}
}
}
}
}
}
}