Pyramids / run_logs /timers.json
nachshonc's picture
firstCommit
b0de0e2
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.40224698185920715,
"min": 0.40224698185920715,
"max": 1.4440940618515015,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12048.1015625,
"min": 12048.1015625,
"max": 43808.0390625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989991.0,
"min": 29952.0,
"max": 989991.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989991.0,
"min": 29952.0,
"max": 989991.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.1403101533651352,
"min": -0.13922427594661713,
"max": 0.1403101533651352,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 35.498470306396484,
"min": -32.99615478515625,
"max": 35.498470306396484,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.006773791275918484,
"min": -0.003155412385240197,
"max": 0.19336429238319397,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.7137691974639893,
"min": -0.7762314677238464,
"max": 46.60079574584961,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.069441738889402,
"min": 0.0649410750140473,
"max": 0.07404260663066427,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9721843444516279,
"min": 0.4930547936041719,
"max": 1.0351920670946129,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.009450458332883434,
"min": 0.0005877843303807379,
"max": 0.009450458332883434,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.13230641666036808,
"min": 0.008228980625330331,
"max": 0.13230641666036808,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.5322832035571495e-06,
"min": 7.5322832035571495e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010545196484980009,
"min": 0.00010545196484980009,
"max": 0.0036335452888182993,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251072857142855,
"min": 0.10251072857142855,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4351501999999998,
"min": 1.3886848,
"max": 2.6111817,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002608217842857146,
"min": 0.0002608217842857146,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003651504980000004,
"min": 0.003651504980000004,
"max": 0.12113705182999998,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.013618141412734985,
"min": 0.01267623994499445,
"max": 0.3824021816253662,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1906539797782898,
"min": 0.18613320589065552,
"max": 2.6768152713775635,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 630.6521739130435,
"min": 630.6521739130435,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29010.0,
"min": 15984.0,
"max": 32779.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.7551377536522017,
"min": -1.0000000521540642,
"max": 0.7551377536522017,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 33.98119891434908,
"min": -30.296201653778553,
"max": 33.98119891434908,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.7551377536522017,
"min": -1.0000000521540642,
"max": 0.7551377536522017,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 33.98119891434908,
"min": -30.296201653778553,
"max": 33.98119891434908,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.08766814472619444,
"min": 0.08766814472619444,
"max": 7.7779965894296765,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.94506651267875,
"min": 3.830692776129581,
"max": 124.44794543087482,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1674121737",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1674123831"
},
"total": 2093.518490296,
"count": 1,
"self": 0.49521606100006466,
"children": {
"run_training.setup": {
"total": 0.09976056200002859,
"count": 1,
"self": 0.09976056200002859
},
"TrainerController.start_learning": {
"total": 2092.923513673,
"count": 1,
"self": 1.5429471820812068,
"children": {
"TrainerController._reset_env": {
"total": 13.736347672000193,
"count": 1,
"self": 13.736347672000193
},
"TrainerController.advance": {
"total": 2077.5507914259188,
"count": 63257,
"self": 1.5786719479056046,
"children": {
"env_step": {
"total": 1414.5920317610512,
"count": 63257,
"self": 1294.5667984419856,
"children": {
"SubprocessEnvManager._take_step": {
"total": 119.05207096897948,
"count": 63257,
"self": 4.683615309970264,
"children": {
"TorchPolicy.evaluate": {
"total": 114.36845565900921,
"count": 62556,
"self": 37.836475124995104,
"children": {
"TorchPolicy.sample_actions": {
"total": 76.53198053401411,
"count": 62556,
"self": 76.53198053401411
}
}
}
}
},
"workers": {
"total": 0.9731623500861133,
"count": 63257,
"self": 0.0,
"children": {
"worker_root": {
"total": 2087.7769159260133,
"count": 63257,
"is_parallel": true,
"self": 902.135521506028,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017834880000009434,
"count": 1,
"is_parallel": true,
"self": 0.0006232750001800014,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001160212999820942,
"count": 8,
"is_parallel": true,
"self": 0.001160212999820942
}
}
},
"UnityEnvironment.step": {
"total": 0.044435861999772897,
"count": 1,
"is_parallel": true,
"self": 0.0004601699993145303,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00043730000015784753,
"count": 1,
"is_parallel": true,
"self": 0.00043730000015784753
},
"communicator.exchange": {
"total": 0.04194584200013196,
"count": 1,
"is_parallel": true,
"self": 0.04194584200013196
},
"steps_from_proto": {
"total": 0.0015925500001685577,
"count": 1,
"is_parallel": true,
"self": 0.0004060200008098036,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011865299993587541,
"count": 8,
"is_parallel": true,
"self": 0.0011865299993587541
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1185.6413944199853,
"count": 63256,
"is_parallel": true,
"self": 28.711528097953305,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.32477447205747,
"count": 63256,
"is_parallel": true,
"self": 22.32477447205747
},
"communicator.exchange": {
"total": 1032.1452526740413,
"count": 63256,
"is_parallel": true,
"self": 1032.1452526740413
},
"steps_from_proto": {
"total": 102.45983917593321,
"count": 63256,
"is_parallel": true,
"self": 23.302325955316064,
"children": {
"_process_rank_one_or_two_observation": {
"total": 79.15751322061715,
"count": 506048,
"is_parallel": true,
"self": 79.15751322061715
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 661.380087716962,
"count": 63257,
"self": 2.9075301539346583,
"children": {
"process_trajectory": {
"total": 145.5607109790244,
"count": 63257,
"self": 145.33582122402368,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22488975500073138,
"count": 2,
"self": 0.22488975500073138
}
}
},
"_update_policy": {
"total": 512.9118465840029,
"count": 452,
"self": 189.4668510729648,
"children": {
"TorchPPOOptimizer.update": {
"total": 323.4449955110381,
"count": 22791,
"self": 323.4449955110381
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.819997674436308e-07,
"count": 1,
"self": 8.819997674436308e-07
},
"TrainerController._save_models": {
"total": 0.09342651099996147,
"count": 1,
"self": 0.0016105300001072465,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09181598099985422,
"count": 1,
"self": 0.09181598099985422
}
}
}
}
}
}
}