ppo-Pyramids / run_logs /timers.json
mike-krk's picture
More learning
e0a284e
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.37153637409210205,
"min": 0.37153637409210205,
"max": 1.3986560106277466,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11152.0361328125,
"min": 11152.0361328125,
"max": 42429.62890625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989875.0,
"min": 29922.0,
"max": 989875.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989875.0,
"min": 29922.0,
"max": 989875.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5014972686767578,
"min": -0.11548372358083725,
"max": 0.615516185760498,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 136.40725708007812,
"min": -27.36964225769043,
"max": 171.72901916503906,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.056535810232162476,
"min": -0.06926246732473373,
"max": 0.561869204044342,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -15.377739906311035,
"min": -19.324228286743164,
"max": 133.16299438476562,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06634535931689675,
"min": 0.06482686955582712,
"max": 0.07165673572526228,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9288350304365546,
"min": 0.46054802762545866,
"max": 1.0424821717800417,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01546134001303775,
"min": 0.0014314126598446836,
"max": 0.018382826918968927,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2164587601825285,
"min": 0.017868821166700646,
"max": 0.2757424037845339,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.346968979614289e-06,
"min": 7.346968979614289e-06,
"max": 0.0002952342015886,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010285756571460005,
"min": 0.00010285756571460005,
"max": 0.0037578460473846995,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10244895714285716,
"min": 0.10244895714285716,
"max": 0.1984114,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4342854000000003,
"min": 1.3888798,
"max": 2.6526153,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025465081857142867,
"min": 0.00025465081857142867,
"max": 0.00984129886,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035651114600000013,
"min": 0.0035651114600000013,
"max": 0.12527626847,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.016183989122509956,
"min": 0.016183989122509956,
"max": 0.6410184502601624,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.2265758365392685,
"min": 0.2265758365392685,
"max": 4.487129211425781,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 375.43589743589746,
"min": 304.9375,
"max": 987.2424242424242,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29284.0,
"min": 16721.0,
"max": 32579.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.573266649666505,
"min": -0.9275515655224974,
"max": 1.6533791540035356,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 122.71479867398739,
"min": -30.609201662242413,
"max": 159.68539853394032,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.573266649666505,
"min": -0.9275515655224974,
"max": 1.6533791540035356,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 122.71479867398739,
"min": -30.609201662242413,
"max": 159.68539853394032,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.06233808774954806,
"min": 0.05300077534699691,
"max": 13.039435151745291,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.862370844464749,
"min": 4.862370844464749,
"max": 221.67039757966995,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1704029988",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=PyramidsTraining1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1704033579"
},
"total": 3591.0262272519994,
"count": 1,
"self": 1.2004776819990184,
"children": {
"run_training.setup": {
"total": 0.06911664100005055,
"count": 1,
"self": 0.06911664100005055
},
"TrainerController.start_learning": {
"total": 3589.7566329290003,
"count": 1,
"self": 2.7229789910102227,
"children": {
"TrainerController._reset_env": {
"total": 4.708644239000023,
"count": 1,
"self": 4.708644239000023
},
"TrainerController.advance": {
"total": 3582.18938459199,
"count": 63922,
"self": 3.2712945478947404,
"children": {
"env_step": {
"total": 2405.860344810014,
"count": 63922,
"self": 2224.383920032934,
"children": {
"SubprocessEnvManager._take_step": {
"total": 179.68760219104047,
"count": 63922,
"self": 7.681746104975787,
"children": {
"TorchPolicy.evaluate": {
"total": 172.00585608606468,
"count": 62572,
"self": 172.00585608606468
}
}
},
"workers": {
"total": 1.7888225860392595,
"count": 63922,
"self": 0.0,
"children": {
"worker_root": {
"total": 3582.163560618989,
"count": 63922,
"is_parallel": true,
"self": 1564.2782998440384,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00931219299991426,
"count": 1,
"is_parallel": true,
"self": 0.005132059999823468,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.004180133000090791,
"count": 8,
"is_parallel": true,
"self": 0.004180133000090791
}
}
},
"UnityEnvironment.step": {
"total": 0.07767648000003646,
"count": 1,
"is_parallel": true,
"self": 0.0008157620000019961,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0006920570000374937,
"count": 1,
"is_parallel": true,
"self": 0.0006920570000374937
},
"communicator.exchange": {
"total": 0.0741032199999836,
"count": 1,
"is_parallel": true,
"self": 0.0741032199999836
},
"steps_from_proto": {
"total": 0.0020654410000133794,
"count": 1,
"is_parallel": true,
"self": 0.0004504510001197559,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016149899998936235,
"count": 8,
"is_parallel": true,
"self": 0.0016149899998936235
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2017.8852607749504,
"count": 63921,
"is_parallel": true,
"self": 55.563861940003335,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 30.88520838599584,
"count": 63921,
"is_parallel": true,
"self": 30.88520838599584
},
"communicator.exchange": {
"total": 1790.6192530339617,
"count": 63921,
"is_parallel": true,
"self": 1790.6192530339617
},
"steps_from_proto": {
"total": 140.81693741498964,
"count": 63921,
"is_parallel": true,
"self": 31.27416615698803,
"children": {
"_process_rank_one_or_two_observation": {
"total": 109.54277125800161,
"count": 511368,
"is_parallel": true,
"self": 109.54277125800161
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1173.0577452340813,
"count": 63922,
"self": 5.787312613146696,
"children": {
"process_trajectory": {
"total": 188.3830523789327,
"count": 63922,
"self": 187.95124712893335,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4318052499993428,
"count": 2,
"self": 0.4318052499993428
}
}
},
"_update_policy": {
"total": 978.8873802420019,
"count": 457,
"self": 409.4333825799458,
"children": {
"TorchPPOOptimizer.update": {
"total": 569.453997662056,
"count": 22803,
"self": 569.453997662056
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5499999790336005e-06,
"count": 1,
"self": 1.5499999790336005e-06
},
"TrainerController._save_models": {
"total": 0.1356235570001445,
"count": 1,
"self": 0.003266636000262224,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13235692099988228,
"count": 1,
"self": 0.13235692099988228
}
}
}
}
}
}
}