ppo-Huggy / run_logs /timers.json
J3's picture
Huggy
ff5ee27
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4081023931503296,
"min": 1.4081023931503296,
"max": 1.4295766353607178,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 68806.921875,
"min": 67935.4765625,
"max": 76108.609375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 89.42210144927536,
"min": 84.82847341337907,
"max": 423.10169491525426,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49361.0,
"min": 48792.0,
"max": 50075.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999938.0,
"min": 49576.0,
"max": 1999938.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999938.0,
"min": 49576.0,
"max": 1999938.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.47186541557312,
"min": 0.022856665775179863,
"max": 2.47186541557312,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1364.4697265625,
"min": 2.674229860305786,
"max": 1437.2381591796875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.9078482077389523,
"min": 1.8477834901239119,
"max": 3.910693529744091,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2157.1322106719017,
"min": 216.19066834449768,
"max": 2279.934327840805,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.9078482077389523,
"min": 1.8477834901239119,
"max": 3.910693529744091,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2157.1322106719017,
"min": 216.19066834449768,
"max": 2279.934327840805,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01612020636118056,
"min": 0.012478595116651073,
"max": 0.019822738979807276,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04836061908354168,
"min": 0.024957190233302146,
"max": 0.05941543560281086,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.054990309642420875,
"min": 0.023376947641372683,
"max": 0.06268126418193182,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16497092892726262,
"min": 0.04777081441134215,
"max": 0.18525689865152042,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.2209489263833317e-06,
"min": 3.2209489263833317e-06,
"max": 0.00029529277656907495,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.662846779149995e-06,
"min": 9.662846779149995e-06,
"max": 0.0008439048186984,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10107361666666664,
"min": 0.10107361666666664,
"max": 0.19843092500000006,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30322084999999993,
"min": 0.20728420000000003,
"max": 0.5813016,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.357347166666662e-05,
"min": 6.357347166666662e-05,
"max": 0.0049217031574999995,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019072041499999985,
"min": 0.00019072041499999985,
"max": 0.014066949840000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1671110216",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1671112495"
},
"total": 2279.016256989,
"count": 1,
"self": 0.4033983019999141,
"children": {
"run_training.setup": {
"total": 0.11385295900004166,
"count": 1,
"self": 0.11385295900004166
},
"TrainerController.start_learning": {
"total": 2278.499005728,
"count": 1,
"self": 3.949804873031553,
"children": {
"TrainerController._reset_env": {
"total": 10.634288450999975,
"count": 1,
"self": 10.634288450999975
},
"TrainerController.advance": {
"total": 2263.796705685969,
"count": 231619,
"self": 4.211165727047955,
"children": {
"env_step": {
"total": 1790.8854276029115,
"count": 231619,
"self": 1504.0238351479015,
"children": {
"SubprocessEnvManager._take_step": {
"total": 284.162320315922,
"count": 231619,
"self": 14.702313165941177,
"children": {
"TorchPolicy.evaluate": {
"total": 269.4600071499808,
"count": 222782,
"self": 67.71440126382322,
"children": {
"TorchPolicy.sample_actions": {
"total": 201.7456058861576,
"count": 222782,
"self": 201.7456058861576
}
}
}
}
},
"workers": {
"total": 2.6992721390879524,
"count": 231619,
"self": 0.0,
"children": {
"worker_root": {
"total": 2270.318079295895,
"count": 231619,
"is_parallel": true,
"self": 1036.153935024131,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0033584489999611833,
"count": 1,
"is_parallel": true,
"self": 0.0003554580000582064,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003002990999902977,
"count": 2,
"is_parallel": true,
"self": 0.003002990999902977
}
}
},
"UnityEnvironment.step": {
"total": 0.027842350000128135,
"count": 1,
"is_parallel": true,
"self": 0.0002678859998468397,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002539360000355373,
"count": 1,
"is_parallel": true,
"self": 0.0002539360000355373
},
"communicator.exchange": {
"total": 0.026560741000139387,
"count": 1,
"is_parallel": true,
"self": 0.026560741000139387
},
"steps_from_proto": {
"total": 0.0007597870001063711,
"count": 1,
"is_parallel": true,
"self": 0.00025260900019929977,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005071779999070714,
"count": 2,
"is_parallel": true,
"self": 0.0005071779999070714
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1234.1641442717641,
"count": 231618,
"is_parallel": true,
"self": 35.66152052180837,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 80.71091040196507,
"count": 231618,
"is_parallel": true,
"self": 80.71091040196507
},
"communicator.exchange": {
"total": 1021.2048244250909,
"count": 231618,
"is_parallel": true,
"self": 1021.2048244250909
},
"steps_from_proto": {
"total": 96.58688892289979,
"count": 231618,
"is_parallel": true,
"self": 41.98372585593711,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.60316306696268,
"count": 463236,
"is_parallel": true,
"self": 54.60316306696268
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 468.7001123560094,
"count": 231619,
"self": 6.382535362946783,
"children": {
"process_trajectory": {
"total": 150.76977607606204,
"count": 231619,
"self": 149.57705099506234,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1927250809997076,
"count": 10,
"self": 1.1927250809997076
}
}
},
"_update_policy": {
"total": 311.5478009170006,
"count": 97,
"self": 258.0676484870098,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.48015242999077,
"count": 2910,
"self": 53.48015242999077
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5279997569450643e-06,
"count": 1,
"self": 1.5279997569450643e-06
},
"TrainerController._save_models": {
"total": 0.11820519000002605,
"count": 1,
"self": 0.001984226000331546,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1162209639996945,
"count": 1,
"self": 0.1162209639996945
}
}
}
}
}
}
}