ppo-Pyramids / run_logs /timers.json
danceone's picture
Pyramids
3310bd8 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3852793276309967,
"min": 0.3797835111618042,
"max": 1.433154821395874,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11459.748046875,
"min": 11350.9697265625,
"max": 43476.18359375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989911.0,
"min": 29952.0,
"max": 989911.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989911.0,
"min": 29952.0,
"max": 989911.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5535212159156799,
"min": -0.07397017627954483,
"max": 0.6457347869873047,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 153.87889099121094,
"min": -17.826812744140625,
"max": 183.388671875,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.00522491754963994,
"min": -0.01958439126610756,
"max": 0.33272603154182434,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -1.4525270462036133,
"min": -5.561967372894287,
"max": 78.85607147216797,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0677243735749178,
"min": 0.06499431559461213,
"max": 0.07326195616755571,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9481412300488492,
"min": 0.4861926172445823,
"max": 1.0655637450011757,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01504329628147154,
"min": 0.001109389307418349,
"max": 0.015637110443601544,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21060614794060156,
"min": 0.014422060996438536,
"max": 0.22328873979859054,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.714811714142857e-06,
"min": 7.714811714142857e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.000108007363998,
"min": 0.000108007363998,
"max": 0.0036338671887109993,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10257157142857144,
"min": 0.10257157142857144,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4360020000000002,
"min": 1.3886848,
"max": 2.6112889999999997,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002668999857142857,
"min": 0.0002668999857142857,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037365998,
"min": 0.0037365998,
"max": 0.1211477711,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010459644719958305,
"min": 0.010377160273492336,
"max": 0.4968646466732025,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.14643502235412598,
"min": 0.14528024196624756,
"max": 3.4780526161193848,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 334.51724137931035,
"min": 294.680412371134,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29103.0,
"min": 15984.0,
"max": 34058.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6194965330691173,
"min": -1.0000000521540642,
"max": 1.6640680231077154,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 140.8961983770132,
"min": -28.66520169377327,
"max": 171.82159805297852,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6194965330691173,
"min": -1.0000000521540642,
"max": 1.6640680231077154,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 140.8961983770132,
"min": -28.66520169377327,
"max": 171.82159805297852,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03631456917213362,
"min": 0.03152031869763753,
"max": 10.237699469551444,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.1593675179756247,
"min": 3.0574709136708407,
"max": 163.8031915128231,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1753751931",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn --force ./config/ppo/PyramidsMy.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1753754090"
},
"total": 2158.6271863230004,
"count": 1,
"self": 0.49298045800014734,
"children": {
"run_training.setup": {
"total": 0.021437057000184723,
"count": 1,
"self": 0.021437057000184723
},
"TrainerController.start_learning": {
"total": 2158.112768808,
"count": 1,
"self": 1.4094962750205013,
"children": {
"TrainerController._reset_env": {
"total": 2.1681423770000947,
"count": 1,
"self": 2.1681423770000947
},
"TrainerController.advance": {
"total": 2154.4517652979794,
"count": 64038,
"self": 1.4200597159506287,
"children": {
"env_step": {
"total": 1505.1154365839925,
"count": 64038,
"self": 1356.8530798689758,
"children": {
"SubprocessEnvManager._take_step": {
"total": 147.4517320460459,
"count": 64038,
"self": 4.529169640144119,
"children": {
"TorchPolicy.evaluate": {
"total": 142.92256240590177,
"count": 62565,
"self": 142.92256240590177
}
}
},
"workers": {
"total": 0.8106246689708314,
"count": 64038,
"self": 0.0,
"children": {
"worker_root": {
"total": 2153.098742075161,
"count": 64038,
"is_parallel": true,
"self": 908.6235415941946,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001828888999625633,
"count": 1,
"is_parallel": true,
"self": 0.0006234419993234042,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012054470003022288,
"count": 8,
"is_parallel": true,
"self": 0.0012054470003022288
}
}
},
"UnityEnvironment.step": {
"total": 0.051520741999866004,
"count": 1,
"is_parallel": true,
"self": 0.0005621669993161049,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004865520004386781,
"count": 1,
"is_parallel": true,
"self": 0.0004865520004386781
},
"communicator.exchange": {
"total": 0.048744470000201545,
"count": 1,
"is_parallel": true,
"self": 0.048744470000201545
},
"steps_from_proto": {
"total": 0.0017275529999096761,
"count": 1,
"is_parallel": true,
"self": 0.00037091600006533554,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013566369998443406,
"count": 8,
"is_parallel": true,
"self": 0.0013566369998443406
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1244.4752004809666,
"count": 64037,
"is_parallel": true,
"self": 31.466622586001904,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.576736523947602,
"count": 64037,
"is_parallel": true,
"self": 22.576736523947602
},
"communicator.exchange": {
"total": 1096.2498769729937,
"count": 64037,
"is_parallel": true,
"self": 1096.2498769729937
},
"steps_from_proto": {
"total": 94.18196439802341,
"count": 64037,
"is_parallel": true,
"self": 18.901278432871095,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.28068596515232,
"count": 512296,
"is_parallel": true,
"self": 75.28068596515232
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 647.9162689980362,
"count": 64038,
"self": 2.6121271349984454,
"children": {
"process_trajectory": {
"total": 126.06765203404393,
"count": 64038,
"self": 125.75018967004462,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3174623639993115,
"count": 2,
"self": 0.3174623639993115
}
}
},
"_update_policy": {
"total": 519.2364898289939,
"count": 455,
"self": 288.79343368699983,
"children": {
"TorchPPOOptimizer.update": {
"total": 230.44305614199402,
"count": 22770,
"self": 230.44305614199402
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.180002962239087e-07,
"count": 1,
"self": 9.180002962239087e-07
},
"TrainerController._save_models": {
"total": 0.08336393999979919,
"count": 1,
"self": 0.00183412399928784,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08152981600051135,
"count": 1,
"self": 0.08152981600051135
}
}
}
}
}
}
}