foreverip's picture
First Push
9469ee6
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3815002143383026,
"min": 0.355610191822052,
"max": 1.4727157354354858,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11493.8388671875,
"min": 10651.236328125,
"max": 44676.3046875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989921.0,
"min": 29952.0,
"max": 989921.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989921.0,
"min": 29952.0,
"max": 989921.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5834378004074097,
"min": -0.1707555055618286,
"max": 0.6622437238693237,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 163.94601440429688,
"min": -40.46905517578125,
"max": 192.71292114257812,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.06509080529212952,
"min": -0.2689429223537445,
"max": 0.45066505670547485,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 18.290515899658203,
"min": -76.37979125976562,
"max": 106.8076171875,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07032672611586306,
"min": 0.06621863313124604,
"max": 0.0733547788418253,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9845741656220829,
"min": 0.47239429945354994,
"max": 1.070762459896893,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.027843490233256794,
"min": 0.0014670771499081241,
"max": 0.027843490233256794,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.3898088632655951,
"min": 0.010625317061311207,
"max": 0.3898088632655951,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.395611820542856e-06,
"min": 7.395611820542856e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010353856548759998,
"min": 0.00010353856548759998,
"max": 0.0032584958138348,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10246517142857146,
"min": 0.10246517142857146,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4345124000000005,
"min": 1.3691136000000002,
"max": 2.4858198,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025627062571428573,
"min": 0.00025627062571428573,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00358778876,
"min": 0.00358778876,
"max": 0.10863790348,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008785853162407875,
"min": 0.008579774759709835,
"max": 0.42506569623947144,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12300194054841995,
"min": 0.12300194054841995,
"max": 2.9754598140716553,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 329.29347826086956,
"min": 282.05555555555554,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30295.0,
"min": 15984.0,
"max": 33981.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5837260670150104,
"min": -1.0000000521540642,
"max": 1.6994166529426973,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 145.70279816538095,
"min": -32.000001668930054,
"max": 183.5369985178113,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5837260670150104,
"min": -1.0000000521540642,
"max": 1.6994166529426973,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 145.70279816538095,
"min": -32.000001668930054,
"max": 183.5369985178113,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.02960676156647463,
"min": 0.026308634459520832,
"max": 8.492122150957584,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.723822064115666,
"min": 2.6045548114925623,
"max": 135.87395441532135,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1696710169",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cu118",
"numpy_version": "1.21.2",
"end_time_seconds": "1696712563"
},
"total": 2393.969678263,
"count": 1,
"self": 0.47550651299934543,
"children": {
"run_training.setup": {
"total": 0.042267687000276055,
"count": 1,
"self": 0.042267687000276055
},
"TrainerController.start_learning": {
"total": 2393.4519040630003,
"count": 1,
"self": 1.5517238758338863,
"children": {
"TrainerController._reset_env": {
"total": 3.0201437169998826,
"count": 1,
"self": 3.0201437169998826
},
"TrainerController.advance": {
"total": 2388.798853809166,
"count": 63908,
"self": 1.567956991027586,
"children": {
"env_step": {
"total": 1648.7635433490827,
"count": 63908,
"self": 1513.8643645580232,
"children": {
"SubprocessEnvManager._take_step": {
"total": 133.98884237199718,
"count": 63908,
"self": 5.042947454931891,
"children": {
"TorchPolicy.evaluate": {
"total": 128.9458949170653,
"count": 62564,
"self": 128.9458949170653
}
}
},
"workers": {
"total": 0.9103364190623324,
"count": 63908,
"self": 0.0,
"children": {
"worker_root": {
"total": 2388.293373693879,
"count": 63908,
"is_parallel": true,
"self": 1002.9505098629011,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002011757999753172,
"count": 1,
"is_parallel": true,
"self": 0.0006104760000198439,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014012819997333281,
"count": 8,
"is_parallel": true,
"self": 0.0014012819997333281
}
}
},
"UnityEnvironment.step": {
"total": 0.053637976000118215,
"count": 1,
"is_parallel": true,
"self": 0.0005762670007243287,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005297349998727441,
"count": 1,
"is_parallel": true,
"self": 0.0005297349998727441
},
"communicator.exchange": {
"total": 0.04987142199979644,
"count": 1,
"is_parallel": true,
"self": 0.04987142199979644
},
"steps_from_proto": {
"total": 0.002660551999724703,
"count": 1,
"is_parallel": true,
"self": 0.0004953649995513842,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0021651870001733187,
"count": 8,
"is_parallel": true,
"self": 0.0021651870001733187
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1385.3428638309779,
"count": 63907,
"is_parallel": true,
"self": 35.29512878188598,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.77566102695164,
"count": 63907,
"is_parallel": true,
"self": 25.77566102695164
},
"communicator.exchange": {
"total": 1208.8517999500955,
"count": 63907,
"is_parallel": true,
"self": 1208.8517999500955
},
"steps_from_proto": {
"total": 115.42027407204478,
"count": 63907,
"is_parallel": true,
"self": 23.085294917598276,
"children": {
"_process_rank_one_or_two_observation": {
"total": 92.3349791544465,
"count": 511256,
"is_parallel": true,
"self": 92.3349791544465
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 738.4673534690555,
"count": 63908,
"self": 2.7380072531336737,
"children": {
"process_trajectory": {
"total": 132.91661907591742,
"count": 63908,
"self": 132.7303127519176,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1863063239998155,
"count": 2,
"self": 0.1863063239998155
}
}
},
"_update_policy": {
"total": 602.8127271400044,
"count": 445,
"self": 390.61249259602846,
"children": {
"TorchPPOOptimizer.update": {
"total": 212.20023454397597,
"count": 22857,
"self": 212.20023454397597
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.920004231389612e-07,
"count": 1,
"self": 9.920004231389612e-07
},
"TrainerController._save_models": {
"total": 0.08118166900021606,
"count": 1,
"self": 0.0014543070001309388,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07972736200008512,
"count": 1,
"self": 0.07972736200008512
}
}
}
}
}
}
}