RL-PyramidRND / run_logs /timers.json
Shlomo's picture
First draft
dd13b38
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.42382150888442993,
"min": 0.406565397977829,
"max": 1.4391682147979736,
"count": 38
},
"Pyramids.Policy.Entropy.sum": {
"value": 12782.45703125,
"min": 12268.517578125,
"max": 43658.60546875,
"count": 38
},
"Pyramids.Step.mean": {
"value": 1139945.0,
"min": 29952.0,
"max": 1139945.0,
"count": 38
},
"Pyramids.Step.sum": {
"value": 1139945.0,
"min": 29952.0,
"max": 1139945.0,
"count": 38
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5910009145736694,
"min": -0.19449840486049652,
"max": 0.6151347160339355,
"count": 38
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 165.48025512695312,
"min": -46.09612274169922,
"max": 173.46798706054688,
"count": 38
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.0011892663314938545,
"min": -0.05871804058551788,
"max": 0.3398294448852539,
"count": 38
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 0.33299458026885986,
"min": -15.44284439086914,
"max": 81.55906677246094,
"count": 38
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07265892177556885,
"min": 0.06522165860016164,
"max": 0.0737188521492437,
"count": 38
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0172249048579638,
"min": 0.48990747327989925,
"max": 1.0513611377876562,
"count": 38
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014149721933271004,
"min": 0.0009288423825395414,
"max": 0.016100048206642882,
"count": 38
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.19809610706579406,
"min": 0.0074598522831093896,
"max": 0.22563192078201144,
"count": 38
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.0001874971517866762,
"min": 0.0001874971517866762,
"max": 0.00029838354339596195,
"count": 38
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.002624960125013467,
"min": 0.0020691136102954665,
"max": 0.003969440476853199,
"count": 38
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.16249903809523808,
"min": 0.16249903809523808,
"max": 0.19946118095238097,
"count": 38
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.2749865333333332,
"min": 1.3897045333333333,
"max": 2.7231468000000003,
"count": 38
},
"Pyramids.Policy.Beta.mean": {
"value": 0.006253653905714286,
"min": 0.006253653905714286,
"max": 0.009946171977142856,
"count": 38
},
"Pyramids.Policy.Beta.sum": {
"value": 0.08755115468,
"min": 0.06897148288,
"max": 0.13232236532000002,
"count": 38
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011484497226774693,
"min": 0.011408279649913311,
"max": 0.47832992672920227,
"count": 38
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.16078296303749084,
"min": 0.1597159206867218,
"max": 3.3483095169067383,
"count": 38
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 347.6847826086956,
"min": 296.52127659574467,
"max": 999.0,
"count": 38
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31987.0,
"min": 15984.0,
"max": 32916.0,
"count": 38
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5657911931911668,
"min": -1.0000000521540642,
"max": 1.68219360106803,
"count": 38
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 142.48699858039618,
"min": -32.000001668930054,
"max": 158.12619850039482,
"count": 38
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5657911931911668,
"min": -1.0000000521540642,
"max": 1.68219360106803,
"count": 38
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 142.48699858039618,
"min": -32.000001668930054,
"max": 158.12619850039482,
"count": 38
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.041036050013964674,
"min": 0.037623133684974164,
"max": 9.714530458673835,
"count": 38
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.7342805512707855,
"min": 3.5231679516437,
"max": 155.43248733878136,
"count": 38
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 38
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 38
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1696776645",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cu118",
"numpy_version": "1.21.2",
"end_time_seconds": "1696779127"
},
"total": 2481.259565326,
"count": 1,
"self": 0.3615020970005389,
"children": {
"run_training.setup": {
"total": 0.051631692999990264,
"count": 1,
"self": 0.051631692999990264
},
"TrainerController.start_learning": {
"total": 2480.8464315359997,
"count": 1,
"self": 1.5297279540295676,
"children": {
"TrainerController._reset_env": {
"total": 7.371571420999999,
"count": 1,
"self": 7.371571420999999
},
"TrainerController.advance": {
"total": 2471.8228511679704,
"count": 73532,
"self": 1.6264904460517755,
"children": {
"env_step": {
"total": 1702.5678217369502,
"count": 73532,
"self": 1559.2617635918562,
"children": {
"SubprocessEnvManager._take_step": {
"total": 142.3746425040416,
"count": 73532,
"self": 5.2179221330416965,
"children": {
"TorchPolicy.evaluate": {
"total": 137.1567203709999,
"count": 72000,
"self": 137.1567203709999
}
}
},
"workers": {
"total": 0.9314156410525527,
"count": 73532,
"self": 0.0,
"children": {
"worker_root": {
"total": 2475.50848561295,
"count": 73532,
"is_parallel": true,
"self": 1049.6477583019785,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002739246000004414,
"count": 1,
"is_parallel": true,
"self": 0.0006799999999884676,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0020592460000159463,
"count": 8,
"is_parallel": true,
"self": 0.0020592460000159463
}
}
},
"UnityEnvironment.step": {
"total": 0.05236203600000522,
"count": 1,
"is_parallel": true,
"self": 0.0006344260000332724,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005367549999846233,
"count": 1,
"is_parallel": true,
"self": 0.0005367549999846233
},
"communicator.exchange": {
"total": 0.049208727000007,
"count": 1,
"is_parallel": true,
"self": 0.049208727000007
},
"steps_from_proto": {
"total": 0.001982127999980321,
"count": 1,
"is_parallel": true,
"self": 0.0004467350000254555,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015353929999548654,
"count": 8,
"is_parallel": true,
"self": 0.0015353929999548654
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1425.8607273109717,
"count": 73531,
"is_parallel": true,
"self": 39.23475183090068,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 28.240800827039948,
"count": 73531,
"is_parallel": true,
"self": 28.240800827039948
},
"communicator.exchange": {
"total": 1234.3995326889938,
"count": 73531,
"is_parallel": true,
"self": 1234.3995326889938
},
"steps_from_proto": {
"total": 123.9856419640374,
"count": 73531,
"is_parallel": true,
"self": 24.678291116113627,
"children": {
"_process_rank_one_or_two_observation": {
"total": 99.30735084792377,
"count": 588248,
"is_parallel": true,
"self": 99.30735084792377
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 767.6285389849686,
"count": 73532,
"self": 2.961079264991099,
"children": {
"process_trajectory": {
"total": 139.28040530797932,
"count": 73532,
"self": 139.0168318729792,
"children": {
"RLTrainer._checkpoint": {
"total": 0.26357343500012576,
"count": 2,
"self": 0.26357343500012576
}
}
},
"_update_policy": {
"total": 625.3870544119982,
"count": 522,
"self": 397.92356415898837,
"children": {
"TorchPPOOptimizer.update": {
"total": 227.4634902530098,
"count": 26235,
"self": 227.4634902530098
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.12228099300000395,
"count": 1,
"self": 0.0018525279997447797,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12042846500025917,
"count": 1,
"self": 0.12042846500025917
}
}
}
}
}
}
}