ppo-PyramidsRND / run_logs /timers.json
brahamdp's picture
First Push
e1ca20d
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3236592411994934,
"min": 0.31995895504951477,
"max": 1.3804181814193726,
"count": 52
},
"Pyramids.Policy.Entropy.sum": {
"value": 9683.884765625,
"min": 9506.6201171875,
"max": 41876.3671875,
"count": 52
},
"Pyramids.Step.mean": {
"value": 1559957.0,
"min": 29952.0,
"max": 1559957.0,
"count": 52
},
"Pyramids.Step.sum": {
"value": 1559957.0,
"min": 29952.0,
"max": 1559957.0,
"count": 52
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.8009302616119385,
"min": -0.09742318838834763,
"max": 0.8087341785430908,
"count": 52
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 233.87164306640625,
"min": -23.38156509399414,
"max": 244.2377166748047,
"count": 52
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.0003482537576928735,
"min": -0.03078695759177208,
"max": 0.37484097480773926,
"count": 52
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 0.10169009864330292,
"min": -8.74349594116211,
"max": 88.83731079101562,
"count": 52
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06909934650632618,
"min": 0.06641634348073108,
"max": 0.07306884105748715,
"count": 52
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9673908510885666,
"min": 0.47176336469219043,
"max": 1.0664764575143206,
"count": 52
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.012188718730439653,
"min": 0.00019509648938030314,
"max": 0.01527911775753767,
"count": 52
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.17064206222615513,
"min": 0.002536254361943941,
"max": 0.21390764860552738,
"count": 52
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.00014556713004906905,
"min": 0.00014556713004906905,
"max": 0.00029838354339596195,
"count": 52
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0020379398206869666,
"min": 0.0020379398206869666,
"max": 0.0038427845190718663,
"count": 52
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.14852235952380952,
"min": 0.14852235952380952,
"max": 0.19946118095238097,
"count": 52
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.0793130333333334,
"min": 1.3962282666666668,
"max": 2.767491266666667,
"count": 52
},
"Pyramids.Policy.Beta.mean": {
"value": 0.004857383716428572,
"min": 0.004857383716428572,
"max": 0.009946171977142856,
"count": 52
},
"Pyramids.Policy.Beta.sum": {
"value": 0.06800337203000001,
"min": 0.06800337203000001,
"max": 0.12810472052,
"count": 52
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.00605732062831521,
"min": 0.005942605435848236,
"max": 0.4936694800853729,
"count": 52
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.08480248600244522,
"min": 0.0831964761018753,
"max": 3.455686330795288,
"count": 52
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 237.98333333333332,
"min": 231.1451612903226,
"max": 999.0,
"count": 52
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28558.0,
"min": 15984.0,
"max": 33332.0,
"count": 52
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7618760199339922,
"min": -1.0000000521540642,
"max": 1.768854827890473,
"count": 52
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 213.18699841201305,
"min": -31.995201662182808,
"max": 223.49099791795015,
"count": 52
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7618760199339922,
"min": -1.0000000521540642,
"max": 1.768854827890473,
"count": 52
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 213.18699841201305,
"min": -31.995201662182808,
"max": 223.49099791795015,
"count": 52
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.01529563271776342,
"min": 0.01529563271776342,
"max": 10.223189648240805,
"count": 52
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.8507715588493738,
"min": 1.8470768433471676,
"max": 163.57103437185287,
"count": 52
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 52
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 52
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678910463",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1678915948"
},
"total": 5485.102352989999,
"count": 1,
"self": 0.6813879429992085,
"children": {
"run_training.setup": {
"total": 0.1417197219998343,
"count": 1,
"self": 0.1417197219998343
},
"TrainerController.start_learning": {
"total": 5484.279245325,
"count": 1,
"self": 3.703674904444597,
"children": {
"TrainerController._reset_env": {
"total": 4.320587345000604,
"count": 1,
"self": 4.320587345000604
},
"TrainerController.advance": {
"total": 5476.080144606557,
"count": 102292,
"self": 3.9171571713632147,
"children": {
"env_step": {
"total": 3705.149352992212,
"count": 102292,
"self": 3483.2920734153404,
"children": {
"SubprocessEnvManager._take_step": {
"total": 219.5124774399137,
"count": 102292,
"self": 11.201043181738896,
"children": {
"TorchPolicy.evaluate": {
"total": 208.3114342581748,
"count": 99048,
"self": 208.3114342581748
}
}
},
"workers": {
"total": 2.3448021369576963,
"count": 102291,
"self": 0.0,
"children": {
"worker_root": {
"total": 5472.538342261794,
"count": 102291,
"is_parallel": true,
"self": 2270.499509283867,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0027955379991908558,
"count": 1,
"is_parallel": true,
"self": 0.0009510059971944429,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018445320019964129,
"count": 8,
"is_parallel": true,
"self": 0.0018445320019964129
}
}
},
"UnityEnvironment.step": {
"total": 0.06343255399951886,
"count": 1,
"is_parallel": true,
"self": 0.0006612509996557492,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005331409993232228,
"count": 1,
"is_parallel": true,
"self": 0.0005331409993232228
},
"communicator.exchange": {
"total": 0.06008661100077006,
"count": 1,
"is_parallel": true,
"self": 0.06008661100077006
},
"steps_from_proto": {
"total": 0.0021515509997698246,
"count": 1,
"is_parallel": true,
"self": 0.0004731979997814051,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016783529999884195,
"count": 8,
"is_parallel": true,
"self": 0.0016783529999884195
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 3202.038832977927,
"count": 102290,
"is_parallel": true,
"self": 68.86226591511513,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 41.04949926395693,
"count": 102290,
"is_parallel": true,
"self": 41.04949926395693
},
"communicator.exchange": {
"total": 2886.3831351125964,
"count": 102290,
"is_parallel": true,
"self": 2886.3831351125964
},
"steps_from_proto": {
"total": 205.74393268625863,
"count": 102290,
"is_parallel": true,
"self": 47.31556353067117,
"children": {
"_process_rank_one_or_two_observation": {
"total": 158.42836915558746,
"count": 818320,
"is_parallel": true,
"self": 158.42836915558746
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1767.013634442982,
"count": 102291,
"self": 7.242888563813722,
"children": {
"process_trajectory": {
"total": 252.20586099115735,
"count": 102291,
"self": 251.7923840211579,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4134769699994649,
"count": 3,
"self": 0.4134769699994649
}
}
},
"_update_policy": {
"total": 1507.564884888011,
"count": 725,
"self": 624.359065966064,
"children": {
"TorchPPOOptimizer.update": {
"total": 883.205818921947,
"count": 36087,
"self": 883.205818921947
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.9139988580718637e-06,
"count": 1,
"self": 1.9139988580718637e-06
},
"TrainerController._save_models": {
"total": 0.17483655499927409,
"count": 1,
"self": 0.002435280999634415,
"children": {
"RLTrainer._checkpoint": {
"total": 0.17240127399963967,
"count": 1,
"self": 0.17240127399963967
}
}
}
}
}
}
}