ppo-PyramidsRND / run_logs /timers.json
arb9p4's picture
First training of PyramidsRND
ef9e1da
raw
history blame contribute delete
No virus
19.2 kB
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.34699752926826477,
"min": 0.3159092664718628,
"max": 1.4603265523910522,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10437.685546875,
"min": 9487.38671875,
"max": 44300.46484375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989920.0,
"min": 29952.0,
"max": 989920.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989920.0,
"min": 29952.0,
"max": 989920.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6751811504364014,
"min": -0.16094757616519928,
"max": 0.7103095054626465,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 194.45217895507812,
"min": -38.14457702636719,
"max": 203.85882568359375,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.01389892864972353,
"min": -0.05723068490624428,
"max": 0.35110634565353394,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.002891540527344,
"min": -15.566746711730957,
"max": 83.21220397949219,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06436349743758217,
"min": 0.06436349743758217,
"max": 0.07470696466276071,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9010889641261504,
"min": 0.4829072002759711,
"max": 1.0670914681566714,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015531243360739955,
"min": 0.0005828524933987601,
"max": 0.017371779915438743,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21743740705035938,
"min": 0.008159934907582642,
"max": 0.2432049188161424,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.50541178394286e-06,
"min": 7.50541178394286e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010507576497520004,
"min": 0.00010507576497520004,
"max": 0.0036330145889952002,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250177142857143,
"min": 0.10250177142857143,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4350248,
"min": 1.3886848,
"max": 2.6110048000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025992696571428583,
"min": 0.00025992696571428583,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036389775200000017,
"min": 0.0036389775200000017,
"max": 0.12111937951999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010142437182366848,
"min": 0.00944580975919962,
"max": 0.3751828670501709,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.14199411869049072,
"min": 0.13224133849143982,
"max": 2.6262800693511963,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 292.50877192982455,
"min": 259.3142857142857,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 33346.0,
"min": 15984.0,
"max": 35004.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6007150296220738,
"min": -1.0000000521540642,
"max": 1.721632364108449,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 180.88079834729433,
"min": -29.67080158740282,
"max": 186.78779824078083,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6007150296220738,
"min": -1.0000000521540642,
"max": 1.721632364108449,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 180.88079834729433,
"min": -29.67080158740282,
"max": 186.78779824078083,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03028535742474415,
"min": 0.027669988410447592,
"max": 7.14602573402226,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.422245388996089,
"min": 2.852475485706236,
"max": 114.33641174435616,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678038458",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1678040735"
},
"total": 2277.750227079,
"count": 1,
"self": 0.4750130949996674,
"children": {
"run_training.setup": {
"total": 0.10181813100007275,
"count": 1,
"self": 0.10181813100007275
},
"TrainerController.start_learning": {
"total": 2277.1733958530003,
"count": 1,
"self": 1.3208378399958747,
"children": {
"TrainerController._reset_env": {
"total": 6.013977581000063,
"count": 1,
"self": 6.013977581000063
},
"TrainerController.advance": {
"total": 2269.7565665790044,
"count": 64143,
"self": 1.3347807180507516,
"children": {
"env_step": {
"total": 1540.8361078829737,
"count": 64143,
"self": 1433.2961203770005,
"children": {
"SubprocessEnvManager._take_step": {
"total": 106.75519567599622,
"count": 64143,
"self": 4.52146611104331,
"children": {
"TorchPolicy.evaluate": {
"total": 102.23372956495291,
"count": 62555,
"self": 34.86866732299063,
"children": {
"TorchPolicy.sample_actions": {
"total": 67.36506224196228,
"count": 62555,
"self": 67.36506224196228
}
}
}
}
},
"workers": {
"total": 0.784791829976939,
"count": 64143,
"self": 0.0,
"children": {
"worker_root": {
"total": 2272.620157166985,
"count": 64143,
"is_parallel": true,
"self": 951.8491250070113,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001968218000001798,
"count": 1,
"is_parallel": true,
"self": 0.0006550559996867378,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013131620003150601,
"count": 8,
"is_parallel": true,
"self": 0.0013131620003150601
}
}
},
"UnityEnvironment.step": {
"total": 0.048099222000018926,
"count": 1,
"is_parallel": true,
"self": 0.0005224360002102912,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004710270000032324,
"count": 1,
"is_parallel": true,
"self": 0.0004710270000032324
},
"communicator.exchange": {
"total": 0.0455780979998508,
"count": 1,
"is_parallel": true,
"self": 0.0455780979998508
},
"steps_from_proto": {
"total": 0.0015276609999546054,
"count": 1,
"is_parallel": true,
"self": 0.0003804980001405056,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011471629998140997,
"count": 8,
"is_parallel": true,
"self": 0.0011471629998140997
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1320.7710321599736,
"count": 64142,
"is_parallel": true,
"self": 30.69176217497261,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.673637889004567,
"count": 64142,
"is_parallel": true,
"self": 22.673637889004567
},
"communicator.exchange": {
"total": 1177.2773399969985,
"count": 64142,
"is_parallel": true,
"self": 1177.2773399969985
},
"steps_from_proto": {
"total": 90.12829209899792,
"count": 64142,
"is_parallel": true,
"self": 21.076402274976317,
"children": {
"_process_rank_one_or_two_observation": {
"total": 69.0518898240216,
"count": 513136,
"is_parallel": true,
"self": 69.0518898240216
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 727.5856779779799,
"count": 64143,
"self": 2.4304591569750755,
"children": {
"process_trajectory": {
"total": 161.31922924300193,
"count": 64143,
"self": 160.94294470600175,
"children": {
"RLTrainer._checkpoint": {
"total": 0.37628453700017417,
"count": 2,
"self": 0.37628453700017417
}
}
},
"_update_policy": {
"total": 563.8359895780029,
"count": 456,
"self": 220.67973269997356,
"children": {
"TorchPPOOptimizer.update": {
"total": 343.15625687802935,
"count": 22779,
"self": 343.15625687802935
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.629999683762435e-07,
"count": 1,
"self": 8.629999683762435e-07
},
"TrainerController._save_models": {
"total": 0.08201298999983919,
"count": 1,
"self": 0.0013870119996681751,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08062597800017102,
"count": 1,
"self": 0.08062597800017102
}
}
}
}
}
}
}