ppo-Pyramids / run_logs /timers.json
chavicoski's picture
Pyramids agent trained for 2000000 steps
0fcb80a
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.32007652521133423,
"min": 0.3051259219646454,
"max": 1.4022296667099,
"count": 66
},
"Pyramids.Policy.Entropy.sum": {
"value": 9515.2353515625,
"min": 9111.4775390625,
"max": 42538.0390625,
"count": 66
},
"Pyramids.Step.mean": {
"value": 1979873.0,
"min": 29952.0,
"max": 1979873.0,
"count": 66
},
"Pyramids.Step.sum": {
"value": 1979873.0,
"min": 29952.0,
"max": 1979873.0,
"count": 66
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6806299686431885,
"min": -0.17591340839862823,
"max": 0.6806299686431885,
"count": 66
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 196.02142333984375,
"min": -42.571044921875,
"max": 196.02142333984375,
"count": 66
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.057078585028648376,
"min": -0.1636228710412979,
"max": 0.7392935156822205,
"count": 66
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 16.43863296508789,
"min": -42.05107879638672,
"max": 178.90902709960938,
"count": 66
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.030852792601633285,
"min": 0.027895903783029922,
"max": 0.03879610501264608,
"count": 66
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.431939096422866,
"min": 0.2715727350885226,
"max": 0.5005986023461445,
"count": 66
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015173682466238026,
"min": 0.00030356136302520423,
"max": 0.023597058529655136,
"count": 66
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21243155452733237,
"min": 0.003946297719327655,
"max": 0.3303588194151719,
"count": 66
},
"Pyramids.Policy.LearningRate.mean": {
"value": 5.206566121653567e-06,
"min": 5.206566121653567e-06,
"max": 0.0002975753150939428,
"count": 66
},
"Pyramids.Policy.LearningRate.sum": {
"value": 7.289192570314993e-05,
"min": 7.289192570314993e-05,
"max": 0.003696603367798899,
"count": 66
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1017354892857143,
"min": 0.1017354892857143,
"max": 0.1991917714285714,
"count": 66
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4242968500000002,
"min": 1.3943423999999998,
"max": 2.5795339,
"count": 66
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00018337537964285703,
"min": 0.00018337537964285703,
"max": 0.009919257965714285,
"count": 66
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0025672553149999984,
"min": 0.0025672553149999984,
"max": 0.12322688989,
"count": 66
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.012603439390659332,
"min": 0.012603439390659332,
"max": 1.0250335931777954,
"count": 66
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.17644815146923065,
"min": 0.17644815146923065,
"max": 7.175234794616699,
"count": 66
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 288.0761904761905,
"min": 279.44545454545454,
"max": 999.0,
"count": 66
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30248.0,
"min": 15984.0,
"max": 33534.0,
"count": 66
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6928666477402052,
"min": -1.0000000521540642,
"max": 1.7067744901367263,
"count": 66
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 177.75099801272154,
"min": -31.9904016405344,
"max": 185.6325981914997,
"count": 66
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6928666477402052,
"min": -1.0000000521540642,
"max": 1.7067744901367263,
"count": 66
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 177.75099801272154,
"min": -31.9904016405344,
"max": 185.6325981914997,
"count": 66
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.0367499168844439,
"min": 0.0367499168844439,
"max": 10.789079509675503,
"count": 66
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.8587412728666095,
"min": 3.7383562036557123,
"max": 305.6231389567256,
"count": 66
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 66
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 66
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673696560",
"python_version": "3.8.8 (default, Feb 24 2021, 21:46:12) \n[GCC 7.3.0]",
"command_line_arguments": "/opt/conda/bin/mlagents-learn ./src/Pyramids_params.yaml --env=ml-agents/training-envs-executables/linux/Pyramids/Pyramids --run-id=2023-01-14_11-42-40 --results-dir=./src/runs/train --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1",
"numpy_version": "1.19.2",
"end_time_seconds": "1673702759"
},
"total": 6198.849485931001,
"count": 1,
"self": 0.21971711200058053,
"children": {
"run_training.setup": {
"total": 0.023255456999322632,
"count": 1,
"self": 0.023255456999322632
},
"TrainerController.start_learning": {
"total": 6198.606513362001,
"count": 1,
"self": 1.8211633099617757,
"children": {
"TrainerController._reset_env": {
"total": 1.5075983500009897,
"count": 1,
"self": 1.5075983500009897
},
"TrainerController.advance": {
"total": 6195.222886185042,
"count": 128351,
"self": 1.6776715021969721,
"children": {
"env_step": {
"total": 5365.760370755066,
"count": 128351,
"self": 5236.600336192678,
"children": {
"SubprocessEnvManager._take_step": {
"total": 128.00788976396143,
"count": 128351,
"self": 4.9586700568634114,
"children": {
"TorchPolicy.evaluate": {
"total": 123.04921970709802,
"count": 125058,
"self": 44.3349472129139,
"children": {
"TorchPolicy.sample_actions": {
"total": 78.71427249418412,
"count": 125058,
"self": 78.71427249418412
}
}
}
}
},
"workers": {
"total": 1.1521447984268889,
"count": 128351,
"self": 0.0,
"children": {
"worker_root": {
"total": 6195.741052330599,
"count": 128351,
"is_parallel": true,
"self": 1081.9337445352849,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002637784000398824,
"count": 1,
"is_parallel": true,
"self": 0.000386405001336243,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002251378999062581,
"count": 8,
"is_parallel": true,
"self": 0.002251378999062581
}
}
},
"UnityEnvironment.step": {
"total": 0.07313721399987116,
"count": 1,
"is_parallel": true,
"self": 0.00011416099914640654,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0007473969999409746,
"count": 1,
"is_parallel": true,
"self": 0.0007473969999409746
},
"communicator.exchange": {
"total": 0.07079669199993077,
"count": 1,
"is_parallel": true,
"self": 0.07079669199993077
},
"steps_from_proto": {
"total": 0.0014789640008530114,
"count": 1,
"is_parallel": true,
"self": 0.0001723120021779323,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001306651998675079,
"count": 8,
"is_parallel": true,
"self": 0.001306651998675079
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 5113.807307795314,
"count": 128350,
"is_parallel": true,
"self": 16.010276674987836,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 80.38447206915589,
"count": 128350,
"is_parallel": true,
"self": 80.38447206915589
},
"communicator.exchange": {
"total": 4793.556476415984,
"count": 128350,
"is_parallel": true,
"self": 4793.556476415984
},
"steps_from_proto": {
"total": 223.85608263518589,
"count": 128350,
"is_parallel": true,
"self": 24.30612519753595,
"children": {
"_process_rank_one_or_two_observation": {
"total": 199.54995743764994,
"count": 1026800,
"is_parallel": true,
"self": 199.54995743764994
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 827.7848439277786,
"count": 128351,
"self": 3.1724225597354234,
"children": {
"process_trajectory": {
"total": 228.78922193700055,
"count": 128351,
"self": 228.55384613300157,
"children": {
"RLTrainer._checkpoint": {
"total": 0.23537580399897706,
"count": 4,
"self": 0.23537580399897706
}
}
},
"_update_policy": {
"total": 595.8231994310427,
"count": 920,
"self": 333.56636655004695,
"children": {
"TorchPPOOptimizer.update": {
"total": 262.2568328809957,
"count": 11268,
"self": 262.2568328809957
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.799982656957582e-07,
"count": 1,
"self": 6.799982656957582e-07
},
"TrainerController._save_models": {
"total": 0.054864836998604005,
"count": 1,
"self": 0.0006613459991058335,
"children": {
"RLTrainer._checkpoint": {
"total": 0.05420349099949817,
"count": 1,
"self": 0.05420349099949817
}
}
}
}
}
}
}