testpyramidsrnd / run_logs /timers.json
go2k's picture
First Pyramids
5e1aeff
raw
history blame contribute delete
No virus
19.2 kB
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.14868123829364777,
"min": 0.14278927445411682,
"max": 1.4375250339508057,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 4469.95263671875,
"min": 4265.4013671875,
"max": 43608.7578125,
"count": 100
},
"Pyramids.Step.mean": {
"value": 2999938.0,
"min": 29988.0,
"max": 2999938.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 2999938.0,
"min": 29988.0,
"max": 2999938.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7365698218345642,
"min": -0.08463963866233826,
"max": 0.8338395357131958,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 219.497802734375,
"min": -20.567432403564453,
"max": 251.8195343017578,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.00500042038038373,
"min": -0.01647786609828472,
"max": 0.5603428483009338,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.490125298500061,
"min": -4.399590492248535,
"max": 133.36160278320312,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07049743700744805,
"min": 0.06367183275287971,
"max": 0.0747115439278007,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9869641181042728,
"min": 0.522980807494605,
"max": 1.1020801744404405,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014860914643837273,
"min": 0.000593786594942078,
"max": 0.015822611126558122,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2080528050137218,
"min": 0.0077192257342470144,
"max": 0.23535000124270572,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.4773066504547604e-06,
"min": 1.4773066504547604e-06,
"max": 0.0002984122862435238,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.0682293106366645e-05,
"min": 2.0682293106366645e-05,
"max": 0.004027453257515633,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10049240238095239,
"min": 0.10049240238095239,
"max": 0.19947076190476193,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4068936333333335,
"min": 1.3962953333333334,
"max": 2.8424843666666666,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 5.9190997857142815e-05,
"min": 5.9190997857142815e-05,
"max": 0.009947129114285714,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0008286739699999994,
"min": 0.0008286739699999994,
"max": 0.13426418823,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.0073387534357607365,
"min": 0.007307820487767458,
"max": 0.48674672842025757,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.10274254530668259,
"min": 0.10230948776006699,
"max": 3.407227039337158,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 247.41538461538462,
"min": 215.57664233576642,
"max": 991.375,
"count": 100
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 32164.0,
"min": 16659.0,
"max": 33161.0,
"count": 100
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7064230631177242,
"min": -0.9297563012223691,
"max": 1.781681466323358,
"count": 100
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 221.83499820530415,
"min": -29.75220163911581,
"max": 240.52699795365334,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7064230631177242,
"min": -0.9297563012223691,
"max": 1.781681466323358,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 221.83499820530415,
"min": -29.75220163911581,
"max": 240.52699795365334,
"count": 100
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.018664570184773764,
"min": 0.018032757156013587,
"max": 9.429891756352257,
"count": 100
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.4263941240205895,
"min": 2.2088452815805795,
"max": 160.30815985798836,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1658387789",
"python_version": "3.7.13 (default, Apr 24 2022, 01:04:09) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./trained-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1658394858"
},
"total": 7068.634116802,
"count": 1,
"self": 0.4831957759997749,
"children": {
"run_training.setup": {
"total": 0.04397284499998477,
"count": 1,
"self": 0.04397284499998477
},
"TrainerController.start_learning": {
"total": 7068.106948181,
"count": 1,
"self": 5.0024704420620765,
"children": {
"TrainerController._reset_env": {
"total": 10.28858216399999,
"count": 1,
"self": 10.28858216399999
},
"TrainerController.advance": {
"total": 7052.715241339937,
"count": 195278,
"self": 5.183453045045098,
"children": {
"env_step": {
"total": 4870.985535174061,
"count": 195278,
"self": 4512.790199598755,
"children": {
"SubprocessEnvManager._take_step": {
"total": 355.49449856123,
"count": 195278,
"self": 15.146295695211109,
"children": {
"TorchPolicy.evaluate": {
"total": 340.3482028660189,
"count": 187554,
"self": 113.89345735982926,
"children": {
"TorchPolicy.sample_actions": {
"total": 226.45474550618962,
"count": 187554,
"self": 226.45474550618962
}
}
}
}
},
"workers": {
"total": 2.700837014075603,
"count": 195278,
"self": 0.0,
"children": {
"worker_root": {
"total": 7055.2340582870265,
"count": 195278,
"is_parallel": true,
"self": 2882.724902824765,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0052520620000109375,
"count": 1,
"is_parallel": true,
"self": 0.0038943510000422066,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001357710999968731,
"count": 8,
"is_parallel": true,
"self": 0.001357710999968731
}
}
},
"UnityEnvironment.step": {
"total": 0.0499717679999776,
"count": 1,
"is_parallel": true,
"self": 0.0005136000000334207,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004906389999632665,
"count": 1,
"is_parallel": true,
"self": 0.0004906389999632665
},
"communicator.exchange": {
"total": 0.04719671699996297,
"count": 1,
"is_parallel": true,
"self": 0.04719671699996297
},
"steps_from_proto": {
"total": 0.0017708120000179406,
"count": 1,
"is_parallel": true,
"self": 0.0004883540000264475,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012824579999914931,
"count": 8,
"is_parallel": true,
"self": 0.0012824579999914931
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 4172.509155462261,
"count": 195277,
"is_parallel": true,
"self": 87.81476495297375,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 76.93956073603164,
"count": 195277,
"is_parallel": true,
"self": 76.93956073603164
},
"communicator.exchange": {
"total": 3704.806107897176,
"count": 195277,
"is_parallel": true,
"self": 3704.806107897176
},
"steps_from_proto": {
"total": 302.9487218760802,
"count": 195277,
"is_parallel": true,
"self": 77.9510243886711,
"children": {
"_process_rank_one_or_two_observation": {
"total": 224.99769748740908,
"count": 1562216,
"is_parallel": true,
"self": 224.99769748740908
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2176.5462531208314,
"count": 195278,
"self": 9.451557499878618,
"children": {
"process_trajectory": {
"total": 526.2340674749619,
"count": 195278,
"self": 525.6102825009621,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6237849739998182,
"count": 6,
"self": 0.6237849739998182
}
}
},
"_update_policy": {
"total": 1640.860628145991,
"count": 1403,
"self": 634.9846306869811,
"children": {
"TorchPPOOptimizer.update": {
"total": 1005.87599745901,
"count": 68388,
"self": 1005.87599745901
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2059999789926223e-06,
"count": 1,
"self": 1.2059999789926223e-06
},
"TrainerController._save_models": {
"total": 0.1006530290005685,
"count": 1,
"self": 0.001801230000637588,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0988517989999309,
"count": 1,
"self": 0.0988517989999309
}
}
}
}
}
}
}