ppo-PyramidsRND / run_logs /timers.json
jules654's picture
First Pyramids push
6ac0754
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4217751622200012,
"min": 0.4068375527858734,
"max": 1.47845458984375,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12801.7197265625,
"min": 11990.31640625,
"max": 44850.3984375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989946.0,
"min": 29952.0,
"max": 989946.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989946.0,
"min": 29952.0,
"max": 989946.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.451963871717453,
"min": -0.17323777079582214,
"max": 0.47231289744377136,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 122.03024291992188,
"min": -41.057350158691406,
"max": 126.1075439453125,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.008978008292615414,
"min": -0.004839690867811441,
"max": 0.35996198654174805,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.4240622520446777,
"min": -1.2534799575805664,
"max": 85.31098937988281,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06924364245066247,
"min": 0.06569798657291044,
"max": 0.07373504016182086,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9694109943092746,
"min": 0.49024346816800424,
"max": 1.0714295787426331,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014895434157681717,
"min": 0.0005694680698887781,
"max": 0.015097129851654584,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20853607820754405,
"min": 0.0076621221479226485,
"max": 0.2113598179231642,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.483447505550003e-06,
"min": 7.483447505550003e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010476826507770005,
"min": 0.00010476826507770005,
"max": 0.0032529410156863993,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10249445,
"min": 0.10249445,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4349223,
"min": 1.3886848,
"max": 2.4825376999999995,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025919555500000016,
"min": 0.00025919555500000016,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036287377700000025,
"min": 0.0036287377700000025,
"max": 0.10845292863999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.00949426181614399,
"min": 0.009307535365223885,
"max": 0.46683362126350403,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.13291966915130615,
"min": 0.13030549883842468,
"max": 3.2678353786468506,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 415.72972972972974,
"min": 388.79487179487177,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30764.0,
"min": 15984.0,
"max": 32511.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4761161902869069,
"min": -1.0000000521540642,
"max": 1.4829563884398875,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 109.23259808123112,
"min": -30.280401691794395,
"max": 115.67059829831123,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4761161902869069,
"min": -1.0000000521540642,
"max": 1.4829563884398875,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 109.23259808123112,
"min": -30.280401691794395,
"max": 115.67059829831123,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04040351385098089,
"min": 0.04040351385098089,
"max": 9.792980520054698,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.989860024972586,
"min": 2.9346383853117004,
"max": 156.68768832087517,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1688248177",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1688250364"
},
"total": 2186.29032398,
"count": 1,
"self": 0.42435851399977764,
"children": {
"run_training.setup": {
"total": 0.037924023999948986,
"count": 1,
"self": 0.037924023999948986
},
"TrainerController.start_learning": {
"total": 2185.828041442,
"count": 1,
"self": 1.3484907879451384,
"children": {
"TrainerController._reset_env": {
"total": 4.051695628000061,
"count": 1,
"self": 4.051695628000061
},
"TrainerController.advance": {
"total": 2180.334058860055,
"count": 63640,
"self": 1.3587807850553872,
"children": {
"env_step": {
"total": 1549.6816942379787,
"count": 63640,
"self": 1443.9187377100927,
"children": {
"SubprocessEnvManager._take_step": {
"total": 105.00415598791642,
"count": 63640,
"self": 4.585051514953875,
"children": {
"TorchPolicy.evaluate": {
"total": 100.41910447296254,
"count": 62573,
"self": 100.41910447296254
}
}
},
"workers": {
"total": 0.7588005399695703,
"count": 63640,
"self": 0.0,
"children": {
"worker_root": {
"total": 2180.8577556869714,
"count": 63640,
"is_parallel": true,
"self": 845.7583590819461,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001730210999994597,
"count": 1,
"is_parallel": true,
"self": 0.000553399999944304,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001176811000050293,
"count": 8,
"is_parallel": true,
"self": 0.001176811000050293
}
}
},
"UnityEnvironment.step": {
"total": 0.04622939899991252,
"count": 1,
"is_parallel": true,
"self": 0.0005357149998417299,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00047806500015212805,
"count": 1,
"is_parallel": true,
"self": 0.00047806500015212805
},
"communicator.exchange": {
"total": 0.04335449099994548,
"count": 1,
"is_parallel": true,
"self": 0.04335449099994548
},
"steps_from_proto": {
"total": 0.0018611279999731778,
"count": 1,
"is_parallel": true,
"self": 0.0003802669996275654,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014808610003456124,
"count": 8,
"is_parallel": true,
"self": 0.0014808610003456124
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1335.0993966050253,
"count": 63639,
"is_parallel": true,
"self": 32.544493379963114,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.04633948601213,
"count": 63639,
"is_parallel": true,
"self": 22.04633948601213
},
"communicator.exchange": {
"total": 1182.0679232290468,
"count": 63639,
"is_parallel": true,
"self": 1182.0679232290468
},
"steps_from_proto": {
"total": 98.44064051000328,
"count": 63639,
"is_parallel": true,
"self": 19.161251931808692,
"children": {
"_process_rank_one_or_two_observation": {
"total": 79.27938857819458,
"count": 509112,
"is_parallel": true,
"self": 79.27938857819458
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 629.2935838370206,
"count": 63640,
"self": 2.4441643889829265,
"children": {
"process_trajectory": {
"total": 104.18556769904103,
"count": 63640,
"self": 103.9859959030407,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1995717960003276,
"count": 2,
"self": 0.1995717960003276
}
}
},
"_update_policy": {
"total": 522.6638517489966,
"count": 446,
"self": 338.7466357519868,
"children": {
"TorchPPOOptimizer.update": {
"total": 183.91721599700986,
"count": 22863,
"self": 183.91721599700986
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.159998626273591e-07,
"count": 1,
"self": 9.159998626273591e-07
},
"TrainerController._save_models": {
"total": 0.09379525000031208,
"count": 1,
"self": 0.0017884540002341964,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09200679600007788,
"count": 1,
"self": 0.09200679600007788
}
}
}
}
}
}
}