ppo-Pyramids / run_logs /timers.json
gauthamk28's picture
initial commit
33bcff7
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4551127254962921,
"min": 0.42451587319374084,
"max": 1.4681589603424072,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13704.3544921875,
"min": 12674.345703125,
"max": 44538.0703125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989997.0,
"min": 29985.0,
"max": 989997.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989997.0,
"min": 29985.0,
"max": 989997.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3583263158798218,
"min": -0.12268036603927612,
"max": 0.3583263158798218,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 94.23982238769531,
"min": -29.565967559814453,
"max": 94.23982238769531,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.1676287055015564,
"min": -0.002137561794370413,
"max": 0.39246413111686707,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 44.08634948730469,
"min": -0.5386655926704407,
"max": 93.40646362304688,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06978873842745088,
"min": 0.06415673648128502,
"max": 0.07353385191934812,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0468310764117632,
"min": 0.5692309833047722,
"max": 1.0561341104319084,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015858215509150696,
"min": 0.00012148027873194602,
"max": 0.015858215509150696,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23787323263726043,
"min": 0.0014577633447833522,
"max": 0.23787323263726043,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.631801604024654e-06,
"min": 7.631801604024654e-06,
"max": 0.0002948532625851449,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011447702406036982,
"min": 0.00011447702406036982,
"max": 0.0033320345720804596,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10254390138264204,
"min": 0.10254390138264204,
"max": 0.19828442028985507,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5381585207396304,
"min": 1.4785644177911046,
"max": 2.317018990504748,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002641357481259371,
"min": 0.0002641357481259371,
"max": 0.009828613586956522,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003962036221889056,
"min": 0.003962036221889056,
"max": 0.11107674827586207,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009732798673212528,
"min": 0.009732798673212528,
"max": 0.2895022928714752,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1459919810295105,
"min": 0.1374702751636505,
"max": 2.3160183429718018,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 447.44262295081967,
"min": 447.44262295081967,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 27294.0,
"min": 16400.0,
"max": 32506.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.3557803010354277,
"min": -0.9999000514547031,
"max": 1.3557803010354277,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 82.70259836316109,
"min": -31.99600164592266,
"max": 82.70259836316109,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.3557803010354277,
"min": -0.9999000514547031,
"max": 1.3557803010354277,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 82.70259836316109,
"min": -31.99600164592266,
"max": 82.70259836316109,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04500335845027546,
"min": 0.04500335845027546,
"max": 6.013633237165563,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.7452048654668033,
"min": 2.7452048654668033,
"max": 102.23176503181458,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679256223",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679258261"
},
"total": 2037.39379102,
"count": 1,
"self": 1.0532449780000661,
"children": {
"run_training.setup": {
"total": 0.10305628800006161,
"count": 1,
"self": 0.10305628800006161
},
"TrainerController.start_learning": {
"total": 2036.2374897539999,
"count": 1,
"self": 1.344994569973096,
"children": {
"TrainerController._reset_env": {
"total": 7.089526703000047,
"count": 1,
"self": 7.089526703000047
},
"TrainerController.advance": {
"total": 2027.6834260040268,
"count": 63367,
"self": 1.5168456190688175,
"children": {
"env_step": {
"total": 1405.5409332989761,
"count": 63367,
"self": 1294.0317847089339,
"children": {
"SubprocessEnvManager._take_step": {
"total": 110.67039413304451,
"count": 63367,
"self": 4.935321253107759,
"children": {
"TorchPolicy.evaluate": {
"total": 105.73507287993675,
"count": 62590,
"self": 105.73507287993675
}
}
},
"workers": {
"total": 0.8387544569977763,
"count": 63367,
"self": 0.0,
"children": {
"worker_root": {
"total": 2031.5918928349324,
"count": 63367,
"is_parallel": true,
"self": 857.9520871939576,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002019345999997313,
"count": 1,
"is_parallel": true,
"self": 0.0006430669998280791,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013762790001692338,
"count": 8,
"is_parallel": true,
"self": 0.0013762790001692338
}
}
},
"UnityEnvironment.step": {
"total": 0.046445686999959435,
"count": 1,
"is_parallel": true,
"self": 0.0005046019998644624,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045528000009653624,
"count": 1,
"is_parallel": true,
"self": 0.00045528000009653624
},
"communicator.exchange": {
"total": 0.04385661500009519,
"count": 1,
"is_parallel": true,
"self": 0.04385661500009519
},
"steps_from_proto": {
"total": 0.0016291899999032466,
"count": 1,
"is_parallel": true,
"self": 0.0003568079996512097,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001272382000252037,
"count": 8,
"is_parallel": true,
"self": 0.001272382000252037
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1173.6398056409748,
"count": 63366,
"is_parallel": true,
"self": 31.26444371182606,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.075154972045766,
"count": 63366,
"is_parallel": true,
"self": 24.075154972045766
},
"communicator.exchange": {
"total": 1022.0349384380486,
"count": 63366,
"is_parallel": true,
"self": 1022.0349384380486
},
"steps_from_proto": {
"total": 96.26526851905442,
"count": 63366,
"is_parallel": true,
"self": 20.737528234216597,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.52774028483782,
"count": 506928,
"is_parallel": true,
"self": 75.52774028483782
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 620.6256470859819,
"count": 63367,
"self": 2.4029402969615603,
"children": {
"process_trajectory": {
"total": 118.09035308102307,
"count": 63367,
"self": 117.80228540202302,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2880676790000507,
"count": 2,
"self": 0.2880676790000507
}
}
},
"_update_policy": {
"total": 500.13235370799725,
"count": 436,
"self": 318.04088598905605,
"children": {
"TorchPPOOptimizer.update": {
"total": 182.0914677189412,
"count": 22851,
"self": 182.0914677189412
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.514999884326244e-06,
"count": 1,
"self": 5.514999884326244e-06
},
"TrainerController._save_models": {
"total": 0.11953696199998376,
"count": 1,
"self": 0.002596451000044908,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11694051099993885,
"count": 1,
"self": 0.11694051099993885
}
}
}
}
}
}
}