ppo-Pyramids / run_logs /timers.json
keyblade95's picture
First Push
d46df0c
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.22811010479927063,
"min": 0.2168775498867035,
"max": 1.4767680168151855,
"count": 50
},
"Pyramids.Policy.Entropy.sum": {
"value": 6850.6025390625,
"min": 6506.32666015625,
"max": 44799.234375,
"count": 50
},
"Pyramids.Step.mean": {
"value": 1499954.0,
"min": 29952.0,
"max": 1499954.0,
"count": 50
},
"Pyramids.Step.sum": {
"value": 1499954.0,
"min": 29952.0,
"max": 1499954.0,
"count": 50
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7591997385025024,
"min": -0.09024693071842194,
"max": 0.8455371260643005,
"count": 50
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 223.96392822265625,
"min": -21.749509811401367,
"max": 255.35220336914062,
"count": 50
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.008865242823958397,
"min": -0.02873266115784645,
"max": 0.3080531358718872,
"count": 50
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -2.615246534347534,
"min": -8.648530960083008,
"max": 74.54885864257812,
"count": 50
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06793310851759575,
"min": 0.06452820453096113,
"max": 0.07404865741028473,
"count": 50
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0189966277639362,
"min": 0.5163264672379625,
"max": 1.0879468544929598,
"count": 50
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0154858152212779,
"min": 0.0010252828612129648,
"max": 0.01679059245465656,
"count": 50
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2322872283191685,
"min": 0.011278111473342614,
"max": 0.24845581312889836,
"count": 50
},
"Pyramids.Policy.LearningRate.mean": {
"value": 3.0318189894266692e-06,
"min": 3.0318189894266692e-06,
"max": 0.00029676708679192377,
"count": 50
},
"Pyramids.Policy.LearningRate.sum": {
"value": 4.547728484140004e-05,
"min": 4.547728484140004e-05,
"max": 0.0037379627540124662,
"count": 50
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10101057333333334,
"min": 0.10101057333333334,
"max": 0.19892236190476195,
"count": 50
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5151586000000001,
"min": 1.3924565333333336,
"max": 2.6851005999999993,
"count": 50
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00011095627600000009,
"min": 0.00011095627600000009,
"max": 0.009892343954285714,
"count": 50
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0016643441400000013,
"min": 0.0016643441400000013,
"max": 0.12461415458,
"count": 50
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009609147906303406,
"min": 0.00955205224454403,
"max": 0.4677009880542755,
"count": 50
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1441372185945511,
"min": 0.1337287276983261,
"max": 3.273906946182251,
"count": 50
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 249.18260869565216,
"min": 214.65185185185186,
"max": 999.0,
"count": 50
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28656.0,
"min": 15984.0,
"max": 33117.0,
"count": 50
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7508173800033071,
"min": -1.0000000521540642,
"max": 1.7695073414703502,
"count": 50
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 201.34399870038033,
"min": -30.431401625275612,
"max": 240.65299843996763,
"count": 50
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7508173800033071,
"min": -1.0000000521540642,
"max": 1.7695073414703502,
"count": 50
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 201.34399870038033,
"min": -30.431401625275612,
"max": 240.65299843996763,
"count": 50
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.024860629309272474,
"min": 0.021765826649618035,
"max": 9.167079098522663,
"count": 50
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.8589723705663346,
"min": 2.8589723705663346,
"max": 146.6732655763626,
"count": 50
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675156895",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=PyramidsTraining --no-graphics --force",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1675160244"
},
"total": 3348.9445013249997,
"count": 1,
"self": 0.6063211089995093,
"children": {
"run_training.setup": {
"total": 0.10440959299990027,
"count": 1,
"self": 0.10440959299990027
},
"TrainerController.start_learning": {
"total": 3348.233770623,
"count": 1,
"self": 1.8131566389415639,
"children": {
"TrainerController._reset_env": {
"total": 9.648885517999929,
"count": 1,
"self": 9.648885517999929
},
"TrainerController.advance": {
"total": 3336.6922771120585,
"count": 96863,
"self": 1.823896124987641,
"children": {
"env_step": {
"total": 2354.7771621200172,
"count": 96863,
"self": 2202.6545989829756,
"children": {
"SubprocessEnvManager._take_step": {
"total": 151.0003262889901,
"count": 96863,
"self": 6.311212082003635,
"children": {
"TorchPolicy.evaluate": {
"total": 144.68911420698646,
"count": 93803,
"self": 49.05537672796231,
"children": {
"TorchPolicy.sample_actions": {
"total": 95.63373747902415,
"count": 93803,
"self": 95.63373747902415
}
}
}
}
},
"workers": {
"total": 1.1222368480516707,
"count": 96863,
"self": 0.0,
"children": {
"worker_root": {
"total": 3342.8342841250465,
"count": 96863,
"is_parallel": true,
"self": 1284.0578550560417,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0058348789999627115,
"count": 1,
"is_parallel": true,
"self": 0.0035042839999732678,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0023305949999894437,
"count": 8,
"is_parallel": true,
"self": 0.0023305949999894437
}
}
},
"UnityEnvironment.step": {
"total": 0.05924704599999586,
"count": 1,
"is_parallel": true,
"self": 0.0005264979999992647,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00047404199995071394,
"count": 1,
"is_parallel": true,
"self": 0.00047404199995071394
},
"communicator.exchange": {
"total": 0.056667810000021746,
"count": 1,
"is_parallel": true,
"self": 0.056667810000021746
},
"steps_from_proto": {
"total": 0.001578696000024138,
"count": 1,
"is_parallel": true,
"self": 0.0004283009999426213,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011503950000815166,
"count": 8,
"is_parallel": true,
"self": 0.0011503950000815166
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2058.7764290690047,
"count": 96862,
"is_parallel": true,
"self": 41.92970633915911,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 33.25340688794404,
"count": 96862,
"is_parallel": true,
"self": 33.25340688794404
},
"communicator.exchange": {
"total": 1846.8967126109562,
"count": 96862,
"is_parallel": true,
"self": 1846.8967126109562
},
"steps_from_proto": {
"total": 136.69660323094524,
"count": 96862,
"is_parallel": true,
"self": 31.953351178821663,
"children": {
"_process_rank_one_or_two_observation": {
"total": 104.74325205212358,
"count": 774896,
"is_parallel": true,
"self": 104.74325205212358
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 980.0912188670536,
"count": 96863,
"self": 3.617603487034785,
"children": {
"process_trajectory": {
"total": 215.61931750301562,
"count": 96863,
"self": 215.35682968901563,
"children": {
"RLTrainer._checkpoint": {
"total": 0.26248781399999643,
"count": 3,
"self": 0.26248781399999643
}
}
},
"_update_policy": {
"total": 760.8542978770032,
"count": 691,
"self": 286.62884187999805,
"children": {
"TorchPPOOptimizer.update": {
"total": 474.22545599700516,
"count": 34236,
"self": 474.22545599700516
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.749998414714355e-07,
"count": 1,
"self": 8.749998414714355e-07
},
"TrainerController._save_models": {
"total": 0.07945047900011559,
"count": 1,
"self": 0.001355904999854829,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07809457400026076,
"count": 1,
"self": 0.07809457400026076
}
}
}
}
}
}
}