testpyramidsrnd / run_logs /timers.json
rajistics's picture
First Pyramids
47f16b4
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.19130906462669373,
"min": 0.17553268373012543,
"max": 1.483086109161377,
"count": 86
},
"Pyramids.Policy.Entropy.sum": {
"value": 5659.6875,
"min": 5240.70361328125,
"max": 44990.8984375,
"count": 86
},
"Pyramids.Step.mean": {
"value": 2579996.0,
"min": 29952.0,
"max": 2579996.0,
"count": 86
},
"Pyramids.Step.sum": {
"value": 2579996.0,
"min": 29952.0,
"max": 2579996.0,
"count": 86
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.8240099549293518,
"min": -0.10620949417352676,
"max": 0.886141300201416,
"count": 86
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 249.67501831054688,
"min": -25.59648895263672,
"max": 274.70379638671875,
"count": 86
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.0140521926805377,
"min": -0.010541483759880066,
"max": 0.3994080126285553,
"count": 86
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.257814407348633,
"min": -2.846200704574585,
"max": 94.65969848632812,
"count": 86
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06674559654786835,
"min": 0.06373497846791344,
"max": 0.07371800786256986,
"count": 86
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9344383516701569,
"min": 0.505506548982956,
"max": 1.0904909626891215,
"count": 86
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01545798972877413,
"min": 0.0003068518053494721,
"max": 0.0170064264398423,
"count": 86
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21641185620283782,
"min": 0.003989073469543137,
"max": 0.2436941318252745,
"count": 86
},
"Pyramids.Policy.LearningRate.mean": {
"value": 4.356361405025239e-05,
"min": 4.356361405025239e-05,
"max": 0.00029838354339596195,
"count": 86
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0006098905967035335,
"min": 0.0006098905967035335,
"max": 0.003800859533046867,
"count": 86
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1145211761904762,
"min": 0.1145211761904762,
"max": 0.19946118095238097,
"count": 86
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.603296466666667,
"min": 1.3962282666666668,
"max": 2.7524795999999996,
"count": 86
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0014606655014285717,
"min": 0.0014606655014285717,
"max": 0.009946171977142856,
"count": 86
},
"Pyramids.Policy.Beta.sum": {
"value": 0.020449317020000003,
"min": 0.020449317020000003,
"max": 0.12670861802,
"count": 86
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.007880064658820629,
"min": 0.007141666021198034,
"max": 0.3941015303134918,
"count": 86
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.11032091081142426,
"min": 0.0999833270907402,
"max": 2.7587106227874756,
"count": 86
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 207.88148148148147,
"min": 207.88148148148147,
"max": 999.0,
"count": 86
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28064.0,
"min": 15984.0,
"max": 32481.0,
"count": 86
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7921193945986122,
"min": -1.0000000521540642,
"max": 1.7921193945986122,
"count": 86
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 240.14399887621403,
"min": -31.995601639151573,
"max": 263.2419985830784,
"count": 86
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7921193945986122,
"min": -1.0000000521540642,
"max": 1.7921193945986122,
"count": 86
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 240.14399887621403,
"min": -31.995601639151573,
"max": 263.2419985830784,
"count": 86
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.017095780673822555,
"min": 0.017095780673822555,
"max": 7.385909885168076,
"count": 86
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.2908346102922224,
"min": 2.078473886853317,
"max": 118.17455816268921,
"count": 86
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 86
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 86
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1657744731",
"python_version": "3.7.13 (default, Apr 24 2022, 01:04:09) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./trained-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1657750711"
},
"total": 5980.278680743,
"count": 1,
"self": 0.42508442800135526,
"children": {
"run_training.setup": {
"total": 0.0426390210000136,
"count": 1,
"self": 0.0426390210000136
},
"TrainerController.start_learning": {
"total": 5979.810957293999,
"count": 1,
"self": 4.7630246399294265,
"children": {
"TrainerController._reset_env": {
"total": 10.02668748200017,
"count": 1,
"self": 10.02668748200017
},
"TrainerController.advance": {
"total": 5964.86891988607,
"count": 168734,
"self": 4.845684374235134,
"children": {
"env_step": {
"total": 4081.294303208959,
"count": 168734,
"self": 3769.118829394134,
"children": {
"SubprocessEnvManager._take_step": {
"total": 309.64846007101346,
"count": 168734,
"self": 13.331629374792556,
"children": {
"TorchPolicy.evaluate": {
"total": 296.3168306962209,
"count": 162701,
"self": 103.58203225335706,
"children": {
"TorchPolicy.sample_actions": {
"total": 192.73479844286385,
"count": 162701,
"self": 192.73479844286385
}
}
}
}
},
"workers": {
"total": 2.5270137438114944,
"count": 168733,
"self": 0.0,
"children": {
"worker_root": {
"total": 5967.984283284089,
"count": 168733,
"is_parallel": true,
"self": 2502.1299474501184,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007898622000084288,
"count": 1,
"is_parallel": true,
"self": 0.006707159000143292,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011914629999409954,
"count": 8,
"is_parallel": true,
"self": 0.0011914629999409954
}
}
},
"UnityEnvironment.step": {
"total": 0.04561361400010355,
"count": 1,
"is_parallel": true,
"self": 0.0005194249997657607,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004514690001542476,
"count": 1,
"is_parallel": true,
"self": 0.0004514690001542476
},
"communicator.exchange": {
"total": 0.043020894000164844,
"count": 1,
"is_parallel": true,
"self": 0.043020894000164844
},
"steps_from_proto": {
"total": 0.0016218260000187001,
"count": 1,
"is_parallel": true,
"self": 0.00041651299989098334,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012053130001277168,
"count": 8,
"is_parallel": true,
"self": 0.0012053130001277168
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 3465.8543358339703,
"count": 168732,
"is_parallel": true,
"self": 79.32340474307784,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 62.74795037784088,
"count": 168732,
"is_parallel": true,
"self": 62.74795037784088
},
"communicator.exchange": {
"total": 3068.720157017121,
"count": 168732,
"is_parallel": true,
"self": 3068.720157017121
},
"steps_from_proto": {
"total": 255.06282369593055,
"count": 168732,
"is_parallel": true,
"self": 66.5604769629465,
"children": {
"_process_rank_one_or_two_observation": {
"total": 188.50234673298405,
"count": 1349856,
"is_parallel": true,
"self": 188.50234673298405
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1878.7289323028767,
"count": 168733,
"self": 8.850312633996055,
"children": {
"process_trajectory": {
"total": 442.49058333387006,
"count": 168733,
"self": 441.85932786387025,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6312554699998145,
"count": 5,
"self": 0.6312554699998145
}
}
},
"_update_policy": {
"total": 1427.3880363350106,
"count": 1204,
"self": 563.4498628769941,
"children": {
"TorchPPOOptimizer.update": {
"total": 863.9381734580165,
"count": 59325,
"self": 863.9381734580165
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5690002328483388e-06,
"count": 1,
"self": 1.5690002328483388e-06
},
"TrainerController._save_models": {
"total": 0.15232371699949,
"count": 1,
"self": 0.0022032189999663387,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15012049799952365,
"count": 1,
"self": 0.15012049799952365
}
}
}
}
}
}
}