ppo-Huggy / run_logs /timers.json
menelaos's picture
Huggy
5e451ee
raw
history blame
17.9 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4052417278289795,
"min": 1.4052417278289795,
"max": 1.427058458328247,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70569.8359375,
"min": 68782.984375,
"max": 78611.3828125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 91.35,
"min": 90.99264705882354,
"max": 393.81889763779526,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49329.0,
"min": 48863.0,
"max": 50162.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999981.0,
"min": 49622.0,
"max": 1999981.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999981.0,
"min": 49622.0,
"max": 1999981.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3746049404144287,
"min": 0.13755851984024048,
"max": 2.4172074794769287,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1282.28662109375,
"min": 17.332372665405273,
"max": 1313.2740478515625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.8172720580189314,
"min": 1.8590096993342278,
"max": 3.902711288079502,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2061.326911330223,
"min": 234.2352221161127,
"max": 2061.326911330223,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.8172720580189314,
"min": 1.8590096993342278,
"max": 3.902711288079502,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2061.326911330223,
"min": 234.2352221161127,
"max": 2061.326911330223,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01759843173713307,
"min": 0.013571080629481003,
"max": 0.020409384913849256,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05279529521139921,
"min": 0.027142161258962006,
"max": 0.05840009799770389,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05555900952054394,
"min": 0.020863294477264087,
"max": 0.06307229076822599,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16667702856163183,
"min": 0.041726588954528174,
"max": 0.18921687230467796,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.250798916433339e-06,
"min": 3.250798916433339e-06,
"max": 0.000295255426581525,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.752396749300016e-06,
"min": 9.752396749300016e-06,
"max": 0.0008438901187033,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10108356666666667,
"min": 0.10108356666666667,
"max": 0.19841847500000004,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3032507,
"min": 0.20729705000000004,
"max": 0.5812967,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.406997666666677e-05,
"min": 6.406997666666677e-05,
"max": 0.004921081902499999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0001922099300000003,
"min": 0.0001922099300000003,
"max": 0.01406670533,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1674303439",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1674305628"
},
"total": 2189.220335198,
"count": 1,
"self": 0.3874520410004152,
"children": {
"run_training.setup": {
"total": 0.10282269599997562,
"count": 1,
"self": 0.10282269599997562
},
"TrainerController.start_learning": {
"total": 2188.730060461,
"count": 1,
"self": 3.663607447962022,
"children": {
"TrainerController._reset_env": {
"total": 11.188114812000038,
"count": 1,
"self": 11.188114812000038
},
"TrainerController.advance": {
"total": 2173.7630936610376,
"count": 230946,
"self": 3.955051096103489,
"children": {
"env_step": {
"total": 1717.7072793749403,
"count": 230946,
"self": 1444.5743032408134,
"children": {
"SubprocessEnvManager._take_step": {
"total": 270.6271653410403,
"count": 230946,
"self": 14.440131751071249,
"children": {
"TorchPolicy.evaluate": {
"total": 256.1870335899691,
"count": 222928,
"self": 65.12145814699676,
"children": {
"TorchPolicy.sample_actions": {
"total": 191.06557544297232,
"count": 222928,
"self": 191.06557544297232
}
}
}
}
},
"workers": {
"total": 2.5058107930866527,
"count": 230946,
"self": 0.0,
"children": {
"worker_root": {
"total": 2178.982895913973,
"count": 230946,
"is_parallel": true,
"self": 989.471648026944,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0022178739999958452,
"count": 1,
"is_parallel": true,
"self": 0.0003806939999435599,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018371800000522853,
"count": 2,
"is_parallel": true,
"self": 0.0018371800000522853
}
}
},
"UnityEnvironment.step": {
"total": 0.027761998000016774,
"count": 1,
"is_parallel": true,
"self": 0.00029503299998623334,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00018542600003002008,
"count": 1,
"is_parallel": true,
"self": 0.00018542600003002008
},
"communicator.exchange": {
"total": 0.02655463499996813,
"count": 1,
"is_parallel": true,
"self": 0.02655463499996813
},
"steps_from_proto": {
"total": 0.0007269040000323912,
"count": 1,
"is_parallel": true,
"self": 0.00025375000006988557,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004731539999625056,
"count": 2,
"is_parallel": true,
"self": 0.0004731539999625056
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1189.5112478870292,
"count": 230945,
"is_parallel": true,
"self": 34.371009500922355,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 76.19930188505958,
"count": 230945,
"is_parallel": true,
"self": 76.19930188505958
},
"communicator.exchange": {
"total": 986.2565882130345,
"count": 230945,
"is_parallel": true,
"self": 986.2565882130345
},
"steps_from_proto": {
"total": 92.68434828801259,
"count": 230945,
"is_parallel": true,
"self": 38.06530313611006,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.619045151902526,
"count": 461890,
"is_parallel": true,
"self": 54.619045151902526
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 452.1007631899939,
"count": 230946,
"self": 6.2038485659984985,
"children": {
"process_trajectory": {
"total": 139.07761023999416,
"count": 230946,
"self": 137.98164804799376,
"children": {
"RLTrainer._checkpoint": {
"total": 1.0959621920003997,
"count": 10,
"self": 1.0959621920003997
}
}
},
"_update_policy": {
"total": 306.81930438400127,
"count": 97,
"self": 253.85989697900277,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.9594074049985,
"count": 2910,
"self": 52.9594074049985
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1580000318645034e-06,
"count": 1,
"self": 1.1580000318645034e-06
},
"TrainerController._save_models": {
"total": 0.11524338200024431,
"count": 1,
"self": 0.0021473670003615553,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11309601499988275,
"count": 1,
"self": 0.11309601499988275
}
}
}
}
}
}
}