amannlp's picture
First Push
6dad65b
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.39594748616218567,
"min": 0.3801190257072449,
"max": 1.4076942205429077,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12024.1328125,
"min": 11367.0791015625,
"max": 42703.8125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989884.0,
"min": 29952.0,
"max": 989884.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989884.0,
"min": 29952.0,
"max": 989884.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5139580368995667,
"min": -0.13790540397167206,
"max": 0.5511236786842346,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 138.7686767578125,
"min": -33.23520278930664,
"max": 155.4168701171875,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.0022845836356282234,
"min": -0.022786881774663925,
"max": 0.5025518536567688,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 0.6168375611305237,
"min": -6.0157365798950195,
"max": 119.10478973388672,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06716546974106072,
"min": 0.06526638166307672,
"max": 0.07293945225796773,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.94031657637485,
"min": 0.5105761658057741,
"max": 1.0344177707892788,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.012353459408803334,
"min": 0.000680715049517911,
"max": 0.015525419622939633,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.17294843172324667,
"min": 0.009530010693250754,
"max": 0.21735587472115486,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.648126022085718e-06,
"min": 7.648126022085718e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010707376430920006,
"min": 0.00010707376430920006,
"max": 0.003507455630848199,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10254934285714287,
"min": 0.10254934285714287,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4356908000000002,
"min": 1.3886848,
"max": 2.5691518,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026467935142857155,
"min": 0.00026467935142857155,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037055109200000015,
"min": 0.0037055109200000015,
"max": 0.11693826482000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.014019317924976349,
"min": 0.014019317924976349,
"max": 0.6390002369880676,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.19627045094966888,
"min": 0.19627045094966888,
"max": 4.473001480102539,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 374.7162162162162,
"min": 344.3636363636364,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 27729.0,
"min": 15984.0,
"max": 33452.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4090053878120474,
"min": -1.0000000521540642,
"max": 1.6165774770081043,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 104.2663986980915,
"min": -29.628401644527912,
"max": 143.26279834657907,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4090053878120474,
"min": -1.0000000521540642,
"max": 1.6165774770081043,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 104.2663986980915,
"min": -29.628401644527912,
"max": 143.26279834657907,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.054473458735800874,
"min": 0.05134430645148824,
"max": 12.499833401292562,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.031035946449265,
"min": 4.031035946449265,
"max": 199.997334420681,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1685040178",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1685042334"
},
"total": 2156.50135813,
"count": 1,
"self": 0.8174747020002542,
"children": {
"run_training.setup": {
"total": 0.03628847799996038,
"count": 1,
"self": 0.03628847799996038
},
"TrainerController.start_learning": {
"total": 2155.64759495,
"count": 1,
"self": 1.375283336022676,
"children": {
"TrainerController._reset_env": {
"total": 3.6871634710000762,
"count": 1,
"self": 3.6871634710000762
},
"TrainerController.advance": {
"total": 2150.4870169299766,
"count": 63735,
"self": 1.364928163003242,
"children": {
"env_step": {
"total": 1519.2527713319976,
"count": 63735,
"self": 1407.747336174077,
"children": {
"SubprocessEnvManager._take_step": {
"total": 110.67511432693686,
"count": 63735,
"self": 4.825785894973478,
"children": {
"TorchPolicy.evaluate": {
"total": 105.84932843196339,
"count": 62547,
"self": 105.84932843196339
}
}
},
"workers": {
"total": 0.8303208309837373,
"count": 63735,
"self": 0.0,
"children": {
"worker_root": {
"total": 2150.6074437929346,
"count": 63735,
"is_parallel": true,
"self": 856.9305646479306,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017258200000469515,
"count": 1,
"is_parallel": true,
"self": 0.000567506000152207,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011583139998947445,
"count": 8,
"is_parallel": true,
"self": 0.0011583139998947445
}
}
},
"UnityEnvironment.step": {
"total": 0.04587353199985955,
"count": 1,
"is_parallel": true,
"self": 0.000542655999879571,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00048216499999398366,
"count": 1,
"is_parallel": true,
"self": 0.00048216499999398366
},
"communicator.exchange": {
"total": 0.04302855700007058,
"count": 1,
"is_parallel": true,
"self": 0.04302855700007058
},
"steps_from_proto": {
"total": 0.0018201539999154193,
"count": 1,
"is_parallel": true,
"self": 0.0004296909999084164,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013904630000070028,
"count": 8,
"is_parallel": true,
"self": 0.0013904630000070028
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1293.676879145004,
"count": 63734,
"is_parallel": true,
"self": 31.299189419022923,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.29726067604588,
"count": 63734,
"is_parallel": true,
"self": 24.29726067604588
},
"communicator.exchange": {
"total": 1137.6608444849367,
"count": 63734,
"is_parallel": true,
"self": 1137.6608444849367
},
"steps_from_proto": {
"total": 100.41958456499856,
"count": 63734,
"is_parallel": true,
"self": 20.582359416961935,
"children": {
"_process_rank_one_or_two_observation": {
"total": 79.83722514803662,
"count": 509872,
"is_parallel": true,
"self": 79.83722514803662
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 629.8693174349758,
"count": 63735,
"self": 2.5640535240247573,
"children": {
"process_trajectory": {
"total": 110.61265421795247,
"count": 63735,
"self": 110.41272188395237,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19993233400009558,
"count": 2,
"self": 0.19993233400009558
}
}
},
"_update_policy": {
"total": 516.6926096929985,
"count": 450,
"self": 332.1491097239741,
"children": {
"TorchPPOOptimizer.update": {
"total": 184.54349996902442,
"count": 22830,
"self": 184.54349996902442
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2800001059076749e-06,
"count": 1,
"self": 1.2800001059076749e-06
},
"TrainerController._save_models": {
"total": 0.09812993300056405,
"count": 1,
"self": 0.0014605390006181551,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0966693939999459,
"count": 1,
"self": 0.0966693939999459
}
}
}
}
}
}
}