ppo-PyramidsRND / run_logs /timers.json
AMI0x's picture
3M Step Trainning Agent
667c499
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.19219397008419037,
"min": 0.18550704419612885,
"max": 0.49777692556381226,
"count": 67
},
"Pyramids.Policy.Entropy.sum": {
"value": 5624.3642578125,
"min": 5342.916015625,
"max": 14828.44921875,
"count": 67
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 226.48507462686567,
"min": 208.86013986013987,
"max": 533.1666666666666,
"count": 67
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30349.0,
"min": 17114.0,
"max": 31990.0,
"count": 67
},
"Pyramids.Step.mean": {
"value": 2999938.0,
"min": 1019997.0,
"max": 2999938.0,
"count": 67
},
"Pyramids.Step.sum": {
"value": 2999938.0,
"min": 1019997.0,
"max": 2999938.0,
"count": 67
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6666208505630493,
"min": 0.36387720704078674,
"max": 0.7674804925918579,
"count": 67
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 93.32691955566406,
"min": 21.42395782470703,
"max": 109.74971008300781,
"count": 67
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.0011070820037275553,
"min": -0.0011070820037275553,
"max": 0.06387358158826828,
"count": 67
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -0.15499147772789001,
"min": -0.15499147772789001,
"max": 6.659514904022217,
"count": 67
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.774075158108446,
"min": 1.0004732936620713,
"max": 1.7912237502478219,
"count": 67
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 235.9519960284233,
"min": 59.13059860467911,
"max": 256.14499628543854,
"count": 67
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.774075158108446,
"min": 1.0004732936620713,
"max": 1.7912237502478219,
"count": 67
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 235.9519960284233,
"min": 59.13059860467911,
"max": 256.14499628543854,
"count": 67
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.013118862431298126,
"min": 0.01211571304853745,
"max": 0.05650609043659642,
"count": 67
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.7448087033626507,
"min": 1.703937309619505,
"max": 3.3903654261957854,
"count": 67
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07021938906689147,
"min": 0.06433822517548483,
"max": 0.07532237745243354,
"count": 67
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9830714469364806,
"min": 0.5112304518232115,
"max": 1.038050119580662,
"count": 67
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01699921610653327,
"min": 0.012220533279286051,
"max": 0.019389853853861164,
"count": 67
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23798902549146578,
"min": 0.08875731165296516,
"max": 0.2533198153621148,
"count": 67
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.5522851968904773e-06,
"min": 1.5522851968904773e-06,
"max": 0.00019890410512721427,
"count": 67
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.1731992756466683e-05,
"min": 2.1731992756466683e-05,
"max": 0.0027090938969688665,
"count": 67
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10051739523809525,
"min": 0.10051739523809525,
"max": 0.16630135714285715,
"count": 67
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4072435333333335,
"min": 1.1641095000000001,
"max": 2.3030311333333335,
"count": 67
},
"Pyramids.Policy.Beta.mean": {
"value": 6.168778428571434e-05,
"min": 6.168778428571434e-05,
"max": 0.006633505578571429,
"count": 67
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0008636289800000007,
"min": 0.0008636289800000007,
"max": 0.09035281022,
"count": 67
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.005565266590565443,
"min": 0.005470035132020712,
"max": 0.011300760321319103,
"count": 67
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.07791373133659363,
"min": 0.07111045718193054,
"max": 0.13362309336662292,
"count": 67
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 67
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 67
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1682361679",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids_Training --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1682366838"
},
"total": 5158.253907008,
"count": 1,
"self": 0.5404954800005726,
"children": {
"run_training.setup": {
"total": 0.10461595600008877,
"count": 1,
"self": 0.10461595600008877
},
"TrainerController.start_learning": {
"total": 5157.6087955719995,
"count": 1,
"self": 3.286220517969923,
"children": {
"TrainerController._reset_env": {
"total": 3.6928145850001783,
"count": 1,
"self": 3.6928145850001783
},
"TrainerController.advance": {
"total": 5150.533193084029,
"count": 130712,
"self": 3.4350088819392113,
"children": {
"env_step": {
"total": 3906.725981640026,
"count": 130712,
"self": 3663.215075721788,
"children": {
"SubprocessEnvManager._take_step": {
"total": 241.43405281694277,
"count": 130712,
"self": 10.486002190841191,
"children": {
"TorchPolicy.evaluate": {
"total": 230.94805062610158,
"count": 125128,
"self": 230.94805062610158
}
}
},
"workers": {
"total": 2.0768531012954554,
"count": 130712,
"self": 0.0,
"children": {
"worker_root": {
"total": 5145.163647358251,
"count": 130712,
"is_parallel": true,
"self": 1741.3275867404564,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001905523000004905,
"count": 1,
"is_parallel": true,
"self": 0.0006361430005199509,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012693799994849542,
"count": 8,
"is_parallel": true,
"self": 0.0012693799994849542
}
}
},
"UnityEnvironment.step": {
"total": 0.04917733299998872,
"count": 1,
"is_parallel": true,
"self": 0.0005287529993438511,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004954010000801645,
"count": 1,
"is_parallel": true,
"self": 0.0004954010000801645
},
"communicator.exchange": {
"total": 0.04648904800023956,
"count": 1,
"is_parallel": true,
"self": 0.04648904800023956
},
"steps_from_proto": {
"total": 0.001664131000325142,
"count": 1,
"is_parallel": true,
"self": 0.0003975099998569931,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012666210004681488,
"count": 8,
"is_parallel": true,
"self": 0.0012666210004681488
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 3403.836060617795,
"count": 130711,
"is_parallel": true,
"self": 67.67384928773436,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 50.31187488609794,
"count": 130711,
"is_parallel": true,
"self": 50.31187488609794
},
"communicator.exchange": {
"total": 3076.8317580709095,
"count": 130711,
"is_parallel": true,
"self": 3076.8317580709095
},
"steps_from_proto": {
"total": 209.01857837305306,
"count": 130711,
"is_parallel": true,
"self": 47.045237717555665,
"children": {
"_process_rank_one_or_two_observation": {
"total": 161.9733406554974,
"count": 1045688,
"is_parallel": true,
"self": 161.9733406554974
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1240.3722025620646,
"count": 130712,
"self": 7.083264434037119,
"children": {
"process_trajectory": {
"total": 196.06929223203406,
"count": 130712,
"self": 195.45935048403317,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6099417480008924,
"count": 4,
"self": 0.6099417480008924
}
}
},
"_update_policy": {
"total": 1037.2196458959934,
"count": 901,
"self": 661.1039385619292,
"children": {
"TorchPPOOptimizer.update": {
"total": 376.11570733406415,
"count": 45618,
"self": 376.11570733406415
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1880001693498343e-06,
"count": 1,
"self": 1.1880001693498343e-06
},
"TrainerController._save_models": {
"total": 0.09656619700035662,
"count": 1,
"self": 0.0018388770004094113,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09472731999994721,
"count": 1,
"self": 0.09472731999994721
}
}
}
}
}
}
}