ppo-Pyramids / run_logs /timers.json
Yureeh's picture
First Push
9e22621
raw
history blame contribute delete
No virus
18.8 kB
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.1545298844575882,
"min": 0.14487138390541077,
"max": 1.4478065967559814,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 4705.1259765625,
"min": 4288.19287109375,
"max": 43920.66015625,
"count": 100
},
"Pyramids.Step.mean": {
"value": 2999962.0,
"min": 29952.0,
"max": 2999962.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 2999962.0,
"min": 29952.0,
"max": 2999962.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.8651622533798218,
"min": -0.10480409860610962,
"max": 0.8841119408607483,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 266.469970703125,
"min": -25.467395782470703,
"max": 268.2238464355469,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.01823974959552288,
"min": -0.007011605426669121,
"max": 0.5194798707962036,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 5.617843151092529,
"min": -2.075435161590576,
"max": 124.51943969726562,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06750972331653993,
"min": 0.06440509073612351,
"max": 0.07405085455199846,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.945136126431559,
"min": 0.4923244847159367,
"max": 1.0830642637059402,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015388585903565401,
"min": 6.453619929498871e-05,
"max": 0.018165371629403404,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21544020264991562,
"min": 0.000903506790129842,
"max": 0.27248057444105106,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.523613777876188e-06,
"min": 1.523613777876188e-06,
"max": 0.00029838354339596195,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.1330592890266634e-05,
"min": 2.1330592890266634e-05,
"max": 0.004010899663033466,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1005078380952381,
"min": 0.1005078380952381,
"max": 0.19946118095238097,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4071097333333333,
"min": 1.3962282666666668,
"max": 2.7369665333333337,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 6.073302571428565e-05,
"min": 6.073302571428565e-05,
"max": 0.009946171977142856,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0008502623599999991,
"min": 0.0008502623599999991,
"max": 0.13370295668,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.00838124193251133,
"min": 0.00811163242906332,
"max": 0.722732663154602,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.11733739078044891,
"min": 0.11356285959482193,
"max": 5.059128761291504,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 223.46478873239437,
"min": 204.40845070422534,
"max": 999.0,
"count": 100
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31732.0,
"min": 15984.0,
"max": 33087.0,
"count": 100
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7483605501097692,
"min": -1.0000000521540642,
"max": 1.7955915334988648,
"count": 100
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 248.26719811558723,
"min": -29.98920151591301,
"max": 254.9739977568388,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7483605501097692,
"min": -1.0000000521540642,
"max": 1.7955915334988648,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 248.26719811558723,
"min": -29.98920151591301,
"max": 254.9739977568388,
"count": 100
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.019456846356673926,
"min": 0.0181169858991216,
"max": 15.741784621030092,
"count": 100
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.7628721826476976,
"min": 2.37493149486545,
"max": 251.86855393648148,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679934652",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679941726"
},
"total": 7074.304308379,
"count": 1,
"self": 0.4914355529999739,
"children": {
"run_training.setup": {
"total": 0.12111765200000946,
"count": 1,
"self": 0.12111765200000946
},
"TrainerController.start_learning": {
"total": 7073.691755174,
"count": 1,
"self": 4.465371082887032,
"children": {
"TrainerController._reset_env": {
"total": 10.242906995999988,
"count": 1,
"self": 10.242906995999988
},
"TrainerController.advance": {
"total": 7058.888956403113,
"count": 194958,
"self": 4.593879521400595,
"children": {
"env_step": {
"total": 5199.395420368696,
"count": 194958,
"self": 4866.331728341763,
"children": {
"SubprocessEnvManager._take_step": {
"total": 330.3681129019975,
"count": 194958,
"self": 14.356998805771184,
"children": {
"TorchPolicy.evaluate": {
"total": 316.0111140962263,
"count": 187568,
"self": 316.0111140962263
}
}
},
"workers": {
"total": 2.6955791249361027,
"count": 194958,
"self": 0.0,
"children": {
"worker_root": {
"total": 7058.669024681076,
"count": 194958,
"is_parallel": true,
"self": 2543.384661940063,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005008815999985927,
"count": 1,
"is_parallel": true,
"self": 0.0036000399999238653,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001408776000062062,
"count": 8,
"is_parallel": true,
"self": 0.001408776000062062
}
}
},
"UnityEnvironment.step": {
"total": 0.04529550300003393,
"count": 1,
"is_parallel": true,
"self": 0.0005295560000604382,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004406209999956445,
"count": 1,
"is_parallel": true,
"self": 0.0004406209999956445
},
"communicator.exchange": {
"total": 0.04278591000002052,
"count": 1,
"is_parallel": true,
"self": 0.04278591000002052
},
"steps_from_proto": {
"total": 0.0015394159999573276,
"count": 1,
"is_parallel": true,
"self": 0.00034641199999896344,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011930039999583641,
"count": 8,
"is_parallel": true,
"self": 0.0011930039999583641
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 4515.284362741013,
"count": 194957,
"is_parallel": true,
"self": 93.76680633408978,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 67.15718588593364,
"count": 194957,
"is_parallel": true,
"self": 67.15718588593364
},
"communicator.exchange": {
"total": 4076.0676329978705,
"count": 194957,
"is_parallel": true,
"self": 4076.0676329978705
},
"steps_from_proto": {
"total": 278.2927375231197,
"count": 194957,
"is_parallel": true,
"self": 60.10413034132222,
"children": {
"_process_rank_one_or_two_observation": {
"total": 218.18860718179747,
"count": 1559656,
"is_parallel": true,
"self": 218.18860718179747
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1854.8996565130165,
"count": 194958,
"self": 8.459839935334458,
"children": {
"process_trajectory": {
"total": 355.6127789716951,
"count": 194958,
"self": 354.90408553969485,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7086934320002456,
"count": 6,
"self": 0.7086934320002456
}
}
},
"_update_policy": {
"total": 1490.827037605987,
"count": 1399,
"self": 947.1703722339637,
"children": {
"TorchPPOOptimizer.update": {
"total": 543.6566653720233,
"count": 68394,
"self": 543.6566653720233
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0119993021362461e-06,
"count": 1,
"self": 1.0119993021362461e-06
},
"TrainerController._save_models": {
"total": 0.0945196800003032,
"count": 1,
"self": 0.0013677150009243633,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09315196499937883,
"count": 1,
"self": 0.09315196499937883
}
}
}
}
}
}
}