ppo-Pyramids / run_logs /timers.json
phatpt's picture
First Push
245ebeb
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5634326338768005,
"min": 0.5634326338768005,
"max": 1.4527751207351685,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 16984.11328125,
"min": 16984.11328125,
"max": 44071.38671875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989921.0,
"min": 29952.0,
"max": 989921.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989921.0,
"min": 29952.0,
"max": 989921.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5152310729026794,
"min": -0.09772597253322601,
"max": 0.5177627801895142,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 144.77993774414062,
"min": -23.64968490600586,
"max": 144.77993774414062,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.06936292350292206,
"min": -0.8142365217208862,
"max": 0.4091348946094513,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 19.490982055664062,
"min": -209.2587890625,
"max": 104.62631225585938,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06872159964922402,
"min": 0.06591818477664632,
"max": 0.07384415903705609,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0308239947383602,
"min": 0.5029376523061415,
"max": 1.0509524585841243,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01536578086670488,
"min": 0.0004509041641179754,
"max": 0.1384973780175652,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23048671300057322,
"min": 0.00586175413353368,
"max": 1.9389632922459128,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.46375751211333e-06,
"min": 7.46375751211333e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011195636268169995,
"min": 0.00011195636268169995,
"max": 0.0037597822467392997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10248788666666667,
"min": 0.10248788666666667,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5373183,
"min": 1.3886848,
"max": 2.6532607,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002585398779999999,
"min": 0.0002585398779999999,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0038780981699999984,
"min": 0.0038780981699999984,
"max": 0.12534074392999997,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.00925430003553629,
"min": 0.00925430003553629,
"max": 0.5296210050582886,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.13881449401378632,
"min": 0.13351547718048096,
"max": 3.7073471546173096,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 374.2317073170732,
"min": 374.2317073170732,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30687.0,
"min": 15984.0,
"max": 33638.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6013707078811599,
"min": -1.0000000521540642,
"max": 1.6013707078811599,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 131.3123980462551,
"min": -28.67000152915716,
"max": 131.3123980462551,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6013707078811599,
"min": -1.0000000521540642,
"max": 1.6013707078811599,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 131.3123980462551,
"min": -28.67000152915716,
"max": 131.3123980462551,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.036110049190295,
"min": 0.036110049190295,
"max": 11.623853388242424,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.9610240336041898,
"min": 2.9545468793658074,
"max": 185.98165421187878,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1692760616",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1692762841"
},
"total": 2224.78985865,
"count": 1,
"self": 0.890931172999899,
"children": {
"run_training.setup": {
"total": 0.04770891699990898,
"count": 1,
"self": 0.04770891699990898
},
"TrainerController.start_learning": {
"total": 2223.8512185600002,
"count": 1,
"self": 1.4315681839520948,
"children": {
"TrainerController._reset_env": {
"total": 4.086086020000039,
"count": 1,
"self": 4.086086020000039
},
"TrainerController.advance": {
"total": 2218.1829378570483,
"count": 63660,
"self": 1.3898323580297074,
"children": {
"env_step": {
"total": 1559.351747215012,
"count": 63660,
"self": 1450.0890955838595,
"children": {
"SubprocessEnvManager._take_step": {
"total": 108.42798975107394,
"count": 63660,
"self": 4.7262296980195515,
"children": {
"TorchPolicy.evaluate": {
"total": 103.70176005305439,
"count": 62551,
"self": 103.70176005305439
}
}
},
"workers": {
"total": 0.8346618800785564,
"count": 63660,
"self": 0.0,
"children": {
"worker_root": {
"total": 2218.366855830046,
"count": 63660,
"is_parallel": true,
"self": 884.0183554610855,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018233350001537474,
"count": 1,
"is_parallel": true,
"self": 0.0005205200002365018,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013028149999172456,
"count": 8,
"is_parallel": true,
"self": 0.0013028149999172456
}
}
},
"UnityEnvironment.step": {
"total": 0.04687446599996292,
"count": 1,
"is_parallel": true,
"self": 0.0005671059998348937,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00047578300018358277,
"count": 1,
"is_parallel": true,
"self": 0.00047578300018358277
},
"communicator.exchange": {
"total": 0.04396212699998614,
"count": 1,
"is_parallel": true,
"self": 0.04396212699998614
},
"steps_from_proto": {
"total": 0.0018694499999583059,
"count": 1,
"is_parallel": true,
"self": 0.0003890080001838214,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014804419997744844,
"count": 8,
"is_parallel": true,
"self": 0.0014804419997744844
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1334.3485003689605,
"count": 63659,
"is_parallel": true,
"self": 33.94861545599929,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.83485084594645,
"count": 63659,
"is_parallel": true,
"self": 22.83485084594645
},
"communicator.exchange": {
"total": 1171.1353511679763,
"count": 63659,
"is_parallel": true,
"self": 1171.1353511679763
},
"steps_from_proto": {
"total": 106.42968289903843,
"count": 63659,
"is_parallel": true,
"self": 20.934616158927383,
"children": {
"_process_rank_one_or_two_observation": {
"total": 85.49506674011104,
"count": 509272,
"is_parallel": true,
"self": 85.49506674011104
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 657.4413582840066,
"count": 63660,
"self": 2.664633511987404,
"children": {
"process_trajectory": {
"total": 109.48544287401955,
"count": 63660,
"self": 109.16208968801993,
"children": {
"RLTrainer._checkpoint": {
"total": 0.32335318599962193,
"count": 2,
"self": 0.32335318599962193
}
}
},
"_update_policy": {
"total": 545.2912818979996,
"count": 455,
"self": 357.7641486479997,
"children": {
"TorchPPOOptimizer.update": {
"total": 187.5271332499999,
"count": 22788,
"self": 187.5271332499999
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3929998203821015e-06,
"count": 1,
"self": 1.3929998203821015e-06
},
"TrainerController._save_models": {
"total": 0.15062510600000678,
"count": 1,
"self": 0.0026597900000524533,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14796531599995433,
"count": 1,
"self": 0.14796531599995433
}
}
}
}
}
}
}