ppo-PyramidsRND / run_logs /timers.json
voxxer's picture
First Push
6a7c5bb
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.43003544211387634,
"min": 0.4259468615055084,
"max": 0.5564640164375305,
"count": 10
},
"Pyramids.Policy.Entropy.sum": {
"value": 12956.107421875,
"min": 11529.9345703125,
"max": 16115.361328125,
"count": 10
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 375.75949367088606,
"min": 297.1521739130435,
"max": 439.6388888888889,
"count": 10
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29685.0,
"min": 16887.0,
"max": 31654.0,
"count": 10
},
"Pyramids.Step.mean": {
"value": 1289993.0,
"min": 1019954.0,
"max": 1289993.0,
"count": 10
},
"Pyramids.Step.sum": {
"value": 1289993.0,
"min": 1019954.0,
"max": 1289993.0,
"count": 10
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4642800986766815,
"min": 0.4642800986766815,
"max": 0.5753942131996155,
"count": 10
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 125.81990814208984,
"min": 91.86604309082031,
"max": 161.11038208007812,
"count": 10
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.006006885785609484,
"min": -0.007385359611362219,
"max": 0.01620672456920147,
"count": 10
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.6278660297393799,
"min": -2.0679006576538086,
"max": 4.489262580871582,
"count": 10
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.3901948580184045,
"min": 1.3901948580184045,
"max": 1.6593565024111583,
"count": 10
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 108.43519892543554,
"min": 75.11099871993065,
"max": 152.66079822182655,
"count": 10
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.3901948580184045,
"min": 1.3901948580184045,
"max": 1.6593565024111583,
"count": 10
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 108.43519892543554,
"min": 75.11099871993065,
"max": 152.66079822182655,
"count": 10
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.028342034898788625,
"min": 0.025077272427038028,
"max": 0.03446142975472968,
"count": 10
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.210678722105513,
"min": 1.5042911969067063,
"max": 2.689527776587056,
"count": 10
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07030187800539554,
"min": 0.06390978233650371,
"max": 0.07175959140572341,
"count": 10
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0545281700809332,
"min": 0.6179326633767536,
"max": 1.0545281700809332,
"count": 10
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015759824926175902,
"min": 0.011126757987464467,
"max": 0.01646578086122948,
"count": 10
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23639737389263854,
"min": 0.10014082188718021,
"max": 0.23639737389263854,
"count": 10
},
"Pyramids.Policy.LearningRate.mean": {
"value": 5.7836980721333314e-06,
"min": 5.7836980721333314e-06,
"max": 6.677051364062393e-05,
"count": 10
},
"Pyramids.Policy.LearningRate.sum": {
"value": 8.675547108199997e-05,
"min": 8.675547108199997e-05,
"max": 0.0008558660377886155,
"count": 10
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10192786666666669,
"min": 0.10192786666666669,
"max": 0.12225681196581197,
"count": 10
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5289180000000002,
"min": 1.1003113076923077,
"max": 1.7019194615384616,
"count": 10
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00020259387999999992,
"min": 0.00020259387999999992,
"max": 0.0022334555153846153,
"count": 10
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0030389081999999987,
"min": 0.0030389081999999987,
"max": 0.028640301938461545,
"count": 10
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.007259301375597715,
"min": 0.007259301375597715,
"max": 0.009108494035899639,
"count": 10
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.10888952016830444,
"min": 0.07797230780124664,
"max": 0.1275189220905304,
"count": 10
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1699628883",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --resume",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1699629615"
},
"total": 731.9789127710001,
"count": 1,
"self": 0.5278101100002459,
"children": {
"run_training.setup": {
"total": 0.06974291000005906,
"count": 1,
"self": 0.06974291000005906
},
"TrainerController.start_learning": {
"total": 731.3813597509998,
"count": 1,
"self": 0.4145712000290587,
"children": {
"TrainerController._reset_env": {
"total": 4.413059200000134,
"count": 1,
"self": 4.413059200000134
},
"TrainerController.advance": {
"total": 726.4602336069702,
"count": 19452,
"self": 0.4373781089680051,
"children": {
"env_step": {
"total": 537.202781170985,
"count": 19452,
"self": 497.5889771820239,
"children": {
"SubprocessEnvManager._take_step": {
"total": 39.35581092697703,
"count": 19452,
"self": 1.401771086952067,
"children": {
"TorchPolicy.evaluate": {
"total": 37.95403984002496,
"count": 18811,
"self": 37.95403984002496
}
}
},
"workers": {
"total": 0.2579930619840525,
"count": 19452,
"self": 0.0,
"children": {
"worker_root": {
"total": 729.7642366219825,
"count": 19452,
"is_parallel": true,
"self": 266.89111232700634,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0023889730000519194,
"count": 1,
"is_parallel": true,
"self": 0.0006929640003363602,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016960089997155592,
"count": 8,
"is_parallel": true,
"self": 0.0016960089997155592
}
}
},
"UnityEnvironment.step": {
"total": 0.055773190999389044,
"count": 1,
"is_parallel": true,
"self": 0.0006318409996310947,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00048096299997268943,
"count": 1,
"is_parallel": true,
"self": 0.00048096299997268943
},
"communicator.exchange": {
"total": 0.052878647000397905,
"count": 1,
"is_parallel": true,
"self": 0.052878647000397905
},
"steps_from_proto": {
"total": 0.001781739999387355,
"count": 1,
"is_parallel": true,
"self": 0.0004123619992242311,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001369378000163124,
"count": 8,
"is_parallel": true,
"self": 0.001369378000163124
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 462.87312429497615,
"count": 19451,
"is_parallel": true,
"self": 10.512716950151116,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.266745670896853,
"count": 19451,
"is_parallel": true,
"self": 7.266745670896853
},
"communicator.exchange": {
"total": 415.094693290951,
"count": 19451,
"is_parallel": true,
"self": 415.094693290951
},
"steps_from_proto": {
"total": 29.998968382977182,
"count": 19451,
"is_parallel": true,
"self": 6.030306335979731,
"children": {
"_process_rank_one_or_two_observation": {
"total": 23.96866204699745,
"count": 155608,
"is_parallel": true,
"self": 23.96866204699745
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 188.8200743270172,
"count": 19452,
"self": 0.851028124034201,
"children": {
"process_trajectory": {
"total": 38.17395239498455,
"count": 19452,
"self": 38.17395239498455
},
"_update_policy": {
"total": 149.79509380799846,
"count": 141,
"self": 89.6413550840125,
"children": {
"TorchPPOOptimizer.update": {
"total": 60.15373872398595,
"count": 6774,
"self": 60.15373872398595
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.22000253922306e-07,
"count": 1,
"self": 9.22000253922306e-07
},
"TrainerController._save_models": {
"total": 0.09349482200013881,
"count": 1,
"self": 0.0016539690004719887,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09184085299966682,
"count": 1,
"self": 0.09184085299966682
}
}
}
}
}
}
}