plegg's picture
First Push
a8b6892
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5278012156486511,
"min": 0.5278012156486511,
"max": 1.4862099885940552,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 15631.361328125,
"min": 15631.361328125,
"max": 45085.66796875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989988.0,
"min": 29952.0,
"max": 989988.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989988.0,
"min": 29952.0,
"max": 989988.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.40233731269836426,
"min": -0.10295893251895905,
"max": 0.5011759996414185,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 105.01004028320312,
"min": -24.916061401367188,
"max": 137.3222198486328,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.0006438057753257453,
"min": 1.9363800674909726e-05,
"max": 0.3857065737247467,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 0.16803330183029175,
"min": 0.00501522421836853,
"max": 91.41246032714844,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06928736775825244,
"min": 0.06596457933581007,
"max": 0.07585073002592671,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9700231486155342,
"min": 0.485842858544041,
"max": 1.0636743503273465,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.012619369759964427,
"min": 0.0011091502173347049,
"max": 0.016686853437009628,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.17667117663950196,
"min": 0.010158100710015432,
"max": 0.23361594811813477,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.3090189922642845e-06,
"min": 7.3090189922642845e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010232626589169998,
"min": 0.00010232626589169998,
"max": 0.0035075687308105,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10243630714285713,
"min": 0.10243630714285713,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4341083,
"min": 1.3691136000000002,
"max": 2.5691895000000002,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002533870835714285,
"min": 0.0002533870835714285,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035474191699999993,
"min": 0.0035474191699999993,
"max": 0.11694203105000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009237015619874,
"min": 0.009237015619874,
"max": 0.4253957271575928,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1293182224035263,
"min": 0.1293182224035263,
"max": 2.9777700901031494,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 437.1940298507463,
"min": 358.64444444444445,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29292.0,
"min": 15984.0,
"max": 32946.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.3836716242245775,
"min": -1.0000000521540642,
"max": 1.5314777610409591,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 92.70599882304668,
"min": -32.000001668930054,
"max": 135.95879770815372,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.3836716242245775,
"min": -1.0000000521540642,
"max": 1.5314777610409591,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 92.70599882304668,
"min": -32.000001668930054,
"max": 135.95879770815372,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04170397825325394,
"min": 0.03598062122427712,
"max": 8.677132241427898,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.7941665429680143,
"min": 2.678321275860071,
"max": 138.83411586284637,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677800466",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677802609"
},
"total": 2143.244381458,
"count": 1,
"self": 0.49024725000072067,
"children": {
"run_training.setup": {
"total": 0.10243032499988658,
"count": 1,
"self": 0.10243032499988658
},
"TrainerController.start_learning": {
"total": 2142.6517038829998,
"count": 1,
"self": 1.2585819099558648,
"children": {
"TrainerController._reset_env": {
"total": 6.073322400000052,
"count": 1,
"self": 6.073322400000052
},
"TrainerController.advance": {
"total": 2135.2355851100438,
"count": 63615,
"self": 1.341933055949994,
"children": {
"env_step": {
"total": 1406.1346729750542,
"count": 63615,
"self": 1298.1099139971786,
"children": {
"SubprocessEnvManager._take_step": {
"total": 107.26040079094219,
"count": 63615,
"self": 4.487225540890222,
"children": {
"TorchPolicy.evaluate": {
"total": 102.77317525005196,
"count": 62562,
"self": 34.81614120701397,
"children": {
"TorchPolicy.sample_actions": {
"total": 67.95703404303799,
"count": 62562,
"self": 67.95703404303799
}
}
}
}
},
"workers": {
"total": 0.7643581869333502,
"count": 63615,
"self": 0.0,
"children": {
"worker_root": {
"total": 2138.2669777769884,
"count": 63615,
"is_parallel": true,
"self": 950.1387406210004,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001937370000177907,
"count": 1,
"is_parallel": true,
"self": 0.0006914179998602776,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012459520003176294,
"count": 8,
"is_parallel": true,
"self": 0.0012459520003176294
}
}
},
"UnityEnvironment.step": {
"total": 0.04483467800014296,
"count": 1,
"is_parallel": true,
"self": 0.0005025969996950153,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004755470001782669,
"count": 1,
"is_parallel": true,
"self": 0.0004755470001782669
},
"communicator.exchange": {
"total": 0.0422917650000727,
"count": 1,
"is_parallel": true,
"self": 0.0422917650000727
},
"steps_from_proto": {
"total": 0.001564769000196975,
"count": 1,
"is_parallel": true,
"self": 0.0003925480004909332,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011722209997060418,
"count": 8,
"is_parallel": true,
"self": 0.0011722209997060418
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1188.128237155988,
"count": 63614,
"is_parallel": true,
"self": 30.32241129400677,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.385593577994996,
"count": 63614,
"is_parallel": true,
"self": 22.385593577994996
},
"communicator.exchange": {
"total": 1045.9175481179989,
"count": 63614,
"is_parallel": true,
"self": 1045.9175481179989
},
"steps_from_proto": {
"total": 89.50268416598738,
"count": 63614,
"is_parallel": true,
"self": 20.918833033930696,
"children": {
"_process_rank_one_or_two_observation": {
"total": 68.58385113205668,
"count": 508912,
"is_parallel": true,
"self": 68.58385113205668
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 727.7589790790394,
"count": 63615,
"self": 2.3706612329849577,
"children": {
"process_trajectory": {
"total": 156.46483486605985,
"count": 63615,
"self": 156.28303619406006,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18179867199978617,
"count": 2,
"self": 0.18179867199978617
}
}
},
"_update_policy": {
"total": 568.9234829799946,
"count": 444,
"self": 221.10128775603766,
"children": {
"TorchPPOOptimizer.update": {
"total": 347.8221952239569,
"count": 22851,
"self": 347.8221952239569
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.410000529896934e-07,
"count": 1,
"self": 9.410000529896934e-07
},
"TrainerController._save_models": {
"total": 0.0842135220000273,
"count": 1,
"self": 0.0014172020000842167,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08279631999994308,
"count": 1,
"self": 0.08279631999994308
}
}
}
}
}
}
}