Pyramids / run_logs /timers.json
Brain22's picture
First Push
361dcb1
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.2979374825954437,
"min": 0.2979374825954437,
"max": 1.353479266166687,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 8923.8232421875,
"min": 8923.8232421875,
"max": 41059.1484375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989907.0,
"min": 29952.0,
"max": 989907.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989907.0,
"min": 29952.0,
"max": 989907.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6589800715446472,
"min": -0.0841200202703476,
"max": 0.8067672848701477,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 187.1503448486328,
"min": -20.357044219970703,
"max": 240.41665649414062,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.009829412214457989,
"min": -0.0029439840000122786,
"max": 0.42868685722351074,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.791553020477295,
"min": -0.8125395774841309,
"max": 101.59878540039062,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07104022711046995,
"min": 0.06149924693874911,
"max": 0.0727027391521482,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9945631795465792,
"min": 0.5089191740650374,
"max": 1.0827229021963956,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015845057009539146,
"min": 0.0010237893431240353,
"max": 0.016853429538535972,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22183079813354803,
"min": 0.014333050803736494,
"max": 0.25028837740925763,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.4141475286499965e-06,
"min": 7.4141475286499965e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010379806540109995,
"min": 0.00010379806540109995,
"max": 0.0036332239889254,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10247134999999999,
"min": 0.10247134999999999,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4345989,
"min": 1.3886848,
"max": 2.6110746,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025688786499999987,
"min": 0.00025688786499999987,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035964301099999985,
"min": 0.0035964301099999985,
"max": 0.12112635254000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.013259338214993477,
"min": 0.013259338214993477,
"max": 0.5204009413719177,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.18563073873519897,
"min": 0.18563073873519897,
"max": 3.6428065299987793,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 308.3854166666667,
"min": 241.23140495867767,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29605.0,
"min": 15984.0,
"max": 32643.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6499374809985359,
"min": -1.0000000521540642,
"max": 1.7587685816544147,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 158.39399817585945,
"min": -29.930401623249054,
"max": 213.73399782180786,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6499374809985359,
"min": -1.0000000521540642,
"max": 1.7587685816544147,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 158.39399817585945,
"min": -29.930401623249054,
"max": 213.73399782180786,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.0427888337035256,
"min": 0.03559912410748956,
"max": 10.227979252114892,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.107728035538457,
"min": 3.880304527716362,
"max": 163.64766803383827,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677131928",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677134252"
},
"total": 2324.021119464,
"count": 1,
"self": 0.4754692229998909,
"children": {
"run_training.setup": {
"total": 0.10628709000002345,
"count": 1,
"self": 0.10628709000002345
},
"TrainerController.start_learning": {
"total": 2323.439363151,
"count": 1,
"self": 1.2986382010622037,
"children": {
"TrainerController._reset_env": {
"total": 6.367014501000085,
"count": 1,
"self": 6.367014501000085
},
"TrainerController.advance": {
"total": 2315.6921278049367,
"count": 64469,
"self": 1.361889323019568,
"children": {
"env_step": {
"total": 1579.1205765900531,
"count": 64469,
"self": 1470.2945965080198,
"children": {
"SubprocessEnvManager._take_step": {
"total": 108.03527264694912,
"count": 64469,
"self": 4.512235577886258,
"children": {
"TorchPolicy.evaluate": {
"total": 103.52303706906287,
"count": 62565,
"self": 35.15671117405691,
"children": {
"TorchPolicy.sample_actions": {
"total": 68.36632589500596,
"count": 62565,
"self": 68.36632589500596
}
}
}
}
},
"workers": {
"total": 0.7907074350841867,
"count": 64469,
"self": 0.0,
"children": {
"worker_root": {
"total": 2318.9195701641047,
"count": 64469,
"is_parallel": true,
"self": 959.2289380551701,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018625559996507945,
"count": 1,
"is_parallel": true,
"self": 0.0006629429999520653,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011996129996987293,
"count": 8,
"is_parallel": true,
"self": 0.0011996129996987293
}
}
},
"UnityEnvironment.step": {
"total": 0.07499156300036702,
"count": 1,
"is_parallel": true,
"self": 0.0005017980001866817,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005546930001401051,
"count": 1,
"is_parallel": true,
"self": 0.0005546930001401051
},
"communicator.exchange": {
"total": 0.0721281410001211,
"count": 1,
"is_parallel": true,
"self": 0.0721281410001211
},
"steps_from_proto": {
"total": 0.0018069309999191319,
"count": 1,
"is_parallel": true,
"self": 0.00041499399912936497,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001391937000789767,
"count": 8,
"is_parallel": true,
"self": 0.001391937000789767
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1359.6906321089346,
"count": 64468,
"is_parallel": true,
"self": 30.82056007289566,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.361311980982464,
"count": 64468,
"is_parallel": true,
"self": 22.361311980982464
},
"communicator.exchange": {
"total": 1217.177592949031,
"count": 64468,
"is_parallel": true,
"self": 1217.177592949031
},
"steps_from_proto": {
"total": 89.33116710602553,
"count": 64468,
"is_parallel": true,
"self": 21.020533225916097,
"children": {
"_process_rank_one_or_two_observation": {
"total": 68.31063388010944,
"count": 515744,
"is_parallel": true,
"self": 68.31063388010944
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 735.209661891864,
"count": 64469,
"self": 2.449281399875872,
"children": {
"process_trajectory": {
"total": 160.50569505398698,
"count": 64469,
"self": 160.31626434898726,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18943070499972237,
"count": 2,
"self": 0.18943070499972237
}
}
},
"_update_policy": {
"total": 572.2546854380012,
"count": 456,
"self": 221.58734638099577,
"children": {
"TorchPPOOptimizer.update": {
"total": 350.6673390570054,
"count": 22833,
"self": 350.6673390570054
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.630005711689591e-07,
"count": 1,
"self": 7.630005711689591e-07
},
"TrainerController._save_models": {
"total": 0.08158188100060215,
"count": 1,
"self": 0.0014653690004706732,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08011651200013148,
"count": 1,
"self": 0.08011651200013148
}
}
}
}
}
}
}