octnn's picture
First Push
5dce0a3 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.39427027106285095,
"min": 0.39427027106285095,
"max": 1.503057599067688,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11727.1748046875,
"min": 11727.1748046875,
"max": 45596.75390625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989983.0,
"min": 29952.0,
"max": 989983.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989983.0,
"min": 29952.0,
"max": 989983.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4405461549758911,
"min": -0.16041484475135803,
"max": 0.47715598344802856,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 120.26910400390625,
"min": -38.01831817626953,
"max": 130.26358032226562,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.06472254544496536,
"min": 0.005800291895866394,
"max": 0.3595026135444641,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 17.669254302978516,
"min": 1.5776793956756592,
"max": 85.20211791992188,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06988046964294933,
"min": 0.06550643272866082,
"max": 0.07200131401237467,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.04820704464424,
"min": 0.5040091980866227,
"max": 1.0646780332431707,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015888691466515486,
"min": 0.00015085834189976672,
"max": 0.015888691466515486,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2383303719977323,
"min": 0.002112016786596734,
"max": 0.2383303719977323,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.421617526160003e-06,
"min": 7.421617526160003e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011132426289240005,
"min": 0.00011132426289240005,
"max": 0.0035089430303524,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10247384000000002,
"min": 0.10247384000000002,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5371076000000004,
"min": 1.3886848,
"max": 2.5696476000000006,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002571366160000002,
"min": 0.0002571366160000002,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0038570492400000025,
"min": 0.0038570492400000025,
"max": 0.11698779524000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011800948530435562,
"min": 0.011800948530435562,
"max": 0.5517892241477966,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.17701423168182373,
"min": 0.17513614892959595,
"max": 3.8625245094299316,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 391.68,
"min": 377.2368421052632,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29376.0,
"min": 15984.0,
"max": 32413.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4749413097898165,
"min": -1.0000000521540642,
"max": 1.4948524727486074,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 110.62059823423624,
"min": -31.991201624274254,
"max": 119.58819781988859,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4749413097898165,
"min": -1.0000000521540642,
"max": 1.4948524727486074,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 110.62059823423624,
"min": -31.991201624274254,
"max": 119.58819781988859,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04701650693003709,
"min": 0.04701650693003709,
"max": 12.144991533830762,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.526238019752782,
"min": 3.526238019752782,
"max": 194.3198645412922,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1706764846",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1706766850"
},
"total": 2003.6855430330002,
"count": 1,
"self": 0.4784587050007758,
"children": {
"run_training.setup": {
"total": 0.045315219999793044,
"count": 1,
"self": 0.045315219999793044
},
"TrainerController.start_learning": {
"total": 2003.1617691079996,
"count": 1,
"self": 1.2035995239743897,
"children": {
"TrainerController._reset_env": {
"total": 2.304946627999925,
"count": 1,
"self": 2.304946627999925
},
"TrainerController.advance": {
"total": 1999.5667290440251,
"count": 63610,
"self": 1.289900200961256,
"children": {
"env_step": {
"total": 1388.6125022690344,
"count": 63610,
"self": 1266.7464263020975,
"children": {
"SubprocessEnvManager._take_step": {
"total": 121.1312562029866,
"count": 63610,
"self": 4.464040634106368,
"children": {
"TorchPolicy.evaluate": {
"total": 116.66721556888024,
"count": 62556,
"self": 116.66721556888024
}
}
},
"workers": {
"total": 0.7348197639503269,
"count": 63610,
"self": 0.0,
"children": {
"worker_root": {
"total": 1998.3162297030844,
"count": 63610,
"is_parallel": true,
"self": 839.5699807410365,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020450080000955495,
"count": 1,
"is_parallel": true,
"self": 0.000602087000515894,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014429209995796555,
"count": 8,
"is_parallel": true,
"self": 0.0014429209995796555
}
}
},
"UnityEnvironment.step": {
"total": 0.04878286700022727,
"count": 1,
"is_parallel": true,
"self": 0.0006065019997549825,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005665480002789991,
"count": 1,
"is_parallel": true,
"self": 0.0005665480002789991
},
"communicator.exchange": {
"total": 0.045886881000114954,
"count": 1,
"is_parallel": true,
"self": 0.045886881000114954
},
"steps_from_proto": {
"total": 0.0017229360000783345,
"count": 1,
"is_parallel": true,
"self": 0.00044658600018010475,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012763499998982297,
"count": 8,
"is_parallel": true,
"self": 0.0012763499998982297
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1158.746248962048,
"count": 63609,
"is_parallel": true,
"self": 33.89405494105995,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.267558004025886,
"count": 63609,
"is_parallel": true,
"self": 23.267558004025886
},
"communicator.exchange": {
"total": 1006.7537877148643,
"count": 63609,
"is_parallel": true,
"self": 1006.7537877148643
},
"steps_from_proto": {
"total": 94.83084830209782,
"count": 63609,
"is_parallel": true,
"self": 18.473791196097864,
"children": {
"_process_rank_one_or_two_observation": {
"total": 76.35705710599996,
"count": 508872,
"is_parallel": true,
"self": 76.35705710599996
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 609.6643265740295,
"count": 63610,
"self": 2.3415845629647265,
"children": {
"process_trajectory": {
"total": 118.32200545906699,
"count": 63610,
"self": 118.1322458300674,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18975962899958176,
"count": 2,
"self": 0.18975962899958176
}
}
},
"_update_policy": {
"total": 489.00073655199776,
"count": 453,
"self": 287.4265515640459,
"children": {
"TorchPPOOptimizer.update": {
"total": 201.57418498795187,
"count": 22785,
"self": 201.57418498795187
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1140000424347818e-06,
"count": 1,
"self": 1.1140000424347818e-06
},
"TrainerController._save_models": {
"total": 0.08649279800010845,
"count": 1,
"self": 0.0015377529998659156,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08495504500024254,
"count": 1,
"self": 0.08495504500024254
}
}
}
}
}
}
}