ppo-Pyramids / run_logs /timers.json
Shivraj8615's picture
First Push
012cf03
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.7296828627586365,
"min": 0.6193974018096924,
"max": 1.4590489864349365,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 21855.4609375,
"min": 18552.19140625,
"max": 44261.7109375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989988.0,
"min": 29952.0,
"max": 989988.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989988.0,
"min": 29952.0,
"max": 989988.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.2756415605545044,
"min": -0.10698758065700531,
"max": 0.2756415605545044,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 71.66680908203125,
"min": -25.356056213378906,
"max": 71.66680908203125,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.5413818359375,
"min": -0.020329132676124573,
"max": 0.6159877777099609,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 140.75927734375,
"min": -5.143270492553711,
"max": 157.07688903808594,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0695317835433794,
"min": 0.06544274452200462,
"max": 0.0728235122905541,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9734449696073116,
"min": 0.502379660177996,
"max": 1.011163737014274,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.03826436714999833,
"min": 0.00013902719090190855,
"max": 0.089207041512039,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.5357011400999766,
"min": 0.0018073534817248111,
"max": 1.248898581168546,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.644547451849998e-06,
"min": 7.644547451849998e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010702366432589998,
"min": 0.00010702366432589998,
"max": 0.0032601749132751,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10254815000000002,
"min": 0.10254815000000002,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4356741000000002,
"min": 1.3886848,
"max": 2.4017630000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.000264560185,
"min": 0.000264560185,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00370384259,
"min": 0.00370384259,
"max": 0.10869381751,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010061299428343773,
"min": 0.010061299428343773,
"max": 0.37853989005088806,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.14085818827152252,
"min": 0.14085818827152252,
"max": 2.6497793197631836,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 513.7636363636364,
"min": 513.7636363636364,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28257.0,
"min": 15984.0,
"max": 32550.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.1952581592581488,
"min": -1.0000000521540642,
"max": 1.1952581592581488,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 65.73919875919819,
"min": -30.99620160460472,
"max": 65.73919875919819,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.1952581592581488,
"min": -1.0000000521540642,
"max": 1.1952581592581488,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 65.73919875919819,
"min": -30.99620160460472,
"max": 65.73919875919819,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.0542708756359802,
"min": 0.0542708756359802,
"max": 7.359631835483015,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.9848981599789113,
"min": 2.9848981599789113,
"max": 117.75410936772823,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1680166582",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1680168584"
},
"total": 2001.395192401,
"count": 1,
"self": 0.47524751900027695,
"children": {
"run_training.setup": {
"total": 0.1146443619998081,
"count": 1,
"self": 0.1146443619998081
},
"TrainerController.start_learning": {
"total": 2000.80530052,
"count": 1,
"self": 1.3757094799855167,
"children": {
"TrainerController._reset_env": {
"total": 7.332073390000005,
"count": 1,
"self": 7.332073390000005
},
"TrainerController.advance": {
"total": 1992.0061069540145,
"count": 63355,
"self": 1.4232795089676529,
"children": {
"env_step": {
"total": 1348.9312914870213,
"count": 63355,
"self": 1238.1125437611113,
"children": {
"SubprocessEnvManager._take_step": {
"total": 109.98765652496922,
"count": 63355,
"self": 4.744134901994812,
"children": {
"TorchPolicy.evaluate": {
"total": 105.24352162297441,
"count": 62575,
"self": 105.24352162297441
}
}
},
"workers": {
"total": 0.8310912009408185,
"count": 63355,
"self": 0.0,
"children": {
"worker_root": {
"total": 1996.2505001488926,
"count": 63355,
"is_parallel": true,
"self": 873.8105875758215,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017549110000345536,
"count": 1,
"is_parallel": true,
"self": 0.0005842159996518603,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011706950003826933,
"count": 8,
"is_parallel": true,
"self": 0.0011706950003826933
}
}
},
"UnityEnvironment.step": {
"total": 0.04390658399984204,
"count": 1,
"is_parallel": true,
"self": 0.000513723000040045,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005156709999027953,
"count": 1,
"is_parallel": true,
"self": 0.0005156709999027953
},
"communicator.exchange": {
"total": 0.04121877299985499,
"count": 1,
"is_parallel": true,
"self": 0.04121877299985499
},
"steps_from_proto": {
"total": 0.00165841700004421,
"count": 1,
"is_parallel": true,
"self": 0.0003459750003003137,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013124419997438963,
"count": 8,
"is_parallel": true,
"self": 0.0013124419997438963
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1122.439912573071,
"count": 63354,
"is_parallel": true,
"self": 31.587443143185965,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.742414693914498,
"count": 63354,
"is_parallel": true,
"self": 23.742414693914498
},
"communicator.exchange": {
"total": 974.281145346979,
"count": 63354,
"is_parallel": true,
"self": 974.281145346979
},
"steps_from_proto": {
"total": 92.82890938899163,
"count": 63354,
"is_parallel": true,
"self": 19.665833068762367,
"children": {
"_process_rank_one_or_two_observation": {
"total": 73.16307632022927,
"count": 506832,
"is_parallel": true,
"self": 73.16307632022927
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 641.6515359580255,
"count": 63355,
"self": 2.533428097114893,
"children": {
"process_trajectory": {
"total": 117.9240082699107,
"count": 63355,
"self": 117.72659769191068,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19741057800001727,
"count": 2,
"self": 0.19741057800001727
}
}
},
"_update_policy": {
"total": 521.1940995909999,
"count": 445,
"self": 333.13917218700135,
"children": {
"TorchPPOOptimizer.update": {
"total": 188.0549274039986,
"count": 22791,
"self": 188.0549274039986
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.410000529896934e-07,
"count": 1,
"self": 9.410000529896934e-07
},
"TrainerController._save_models": {
"total": 0.0914097549998587,
"count": 1,
"self": 0.0014795549996051705,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08993020000025354,
"count": 1,
"self": 0.08993020000025354
}
}
}
}
}
}
}