ppo-PyramidRND / run_logs /timers.json
bhadresh-savani's picture
First Push
1e8f71d
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.48803627490997314,
"min": 0.4691722095012665,
"max": 1.326302170753479,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 14656.705078125,
"min": 14172.75390625,
"max": 40234.703125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989955.0,
"min": 29876.0,
"max": 989955.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989955.0,
"min": 29876.0,
"max": 989955.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4348636865615845,
"min": -0.09703798592090607,
"max": 0.47636258602142334,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 118.71778869628906,
"min": -23.386154174804688,
"max": 126.71244812011719,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -1.442028284072876,
"min": -1.442028284072876,
"max": 0.4942820072174072,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -393.6737060546875,
"min": -393.6737060546875,
"max": 117.14483642578125,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06956362520023027,
"min": 0.0644105707504917,
"max": 0.07259371186792786,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9738907528032238,
"min": 0.5020742187254403,
"max": 1.0214940445924487,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.4001004337859783,
"min": 0.0004993289633673127,
"max": 0.4001004337859783,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 5.601406073003696,
"min": 0.005991947560407752,
"max": 5.601406073003696,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.460240370428571e-06,
"min": 7.460240370428571e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010444336518599999,
"min": 0.00010444336518599999,
"max": 0.0032596697134434994,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10248671428571428,
"min": 0.10248671428571428,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4348139999999998,
"min": 1.3886848,
"max": 2.4426158,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002584227571428571,
"min": 0.0002584227571428571,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036179185999999997,
"min": 0.0036179185999999997,
"max": 0.10867699435,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009831900708377361,
"min": 0.009785848669707775,
"max": 0.483313649892807,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1376466155052185,
"min": 0.1370018869638443,
"max": 3.383195638656616,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 434.2368421052632,
"min": 383.02777777777777,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 33002.0,
"min": 16803.0,
"max": 33585.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.3814815551434692,
"min": -0.9999375520274043,
"max": 1.5551588819043276,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 104.99259819090366,
"min": -31.998001664876938,
"max": 113.52659837901592,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.3814815551434692,
"min": -0.9999375520274043,
"max": 1.5551588819043276,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 104.99259819090366,
"min": -31.998001664876938,
"max": 113.52659837901592,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04368210466991753,
"min": 0.03957188677414295,
"max": 9.365610009607147,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.319839954913732,
"min": 2.8887477345124353,
"max": 159.2153701633215,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1682755751",
"python_version": "3.8.16 (default, Mar 2 2023, 03:21:46) \n[GCC 11.2.0]",
"command_line_arguments": "/root/miniconda3/envs/mlagent_env/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu111",
"numpy_version": "1.21.2",
"end_time_seconds": "1682757175"
},
"total": 1424.1670268690214,
"count": 1,
"self": 0.373104577884078,
"children": {
"run_training.setup": {
"total": 0.016415647231042385,
"count": 1,
"self": 0.016415647231042385
},
"TrainerController.start_learning": {
"total": 1423.7775066439062,
"count": 1,
"self": 1.4699603440240026,
"children": {
"TrainerController._reset_env": {
"total": 4.1304948180913925,
"count": 1,
"self": 4.1304948180913925
},
"TrainerController.advance": {
"total": 1418.1123624667525,
"count": 63550,
"self": 1.4319884749129415,
"children": {
"env_step": {
"total": 985.8177361739799,
"count": 63550,
"self": 886.8938535535708,
"children": {
"SubprocessEnvManager._take_step": {
"total": 98.04699791874737,
"count": 63550,
"self": 3.8840013816952705,
"children": {
"TorchPolicy.evaluate": {
"total": 94.1629965370521,
"count": 62567,
"self": 94.1629965370521
}
}
},
"workers": {
"total": 0.8768847016617656,
"count": 63550,
"self": 0.0,
"children": {
"worker_root": {
"total": 1421.1386285750195,
"count": 63550,
"is_parallel": true,
"self": 616.0351407621056,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001508898101747036,
"count": 1,
"is_parallel": true,
"self": 0.00047452282160520554,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010343752801418304,
"count": 8,
"is_parallel": true,
"self": 0.0010343752801418304
}
}
},
"UnityEnvironment.step": {
"total": 0.03700953349471092,
"count": 1,
"is_parallel": true,
"self": 0.0003574267029762268,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002939673140645027,
"count": 1,
"is_parallel": true,
"self": 0.0002939673140645027
},
"communicator.exchange": {
"total": 0.03551559429615736,
"count": 1,
"is_parallel": true,
"self": 0.03551559429615736
},
"steps_from_proto": {
"total": 0.0008425451815128326,
"count": 1,
"is_parallel": true,
"self": 0.0002325214445590973,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006100237369537354,
"count": 8,
"is_parallel": true,
"self": 0.0006100237369537354
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 805.103487812914,
"count": 63549,
"is_parallel": true,
"self": 16.721625689417124,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 11.140724504366517,
"count": 63549,
"is_parallel": true,
"self": 11.140724504366517
},
"communicator.exchange": {
"total": 730.9195039076731,
"count": 63549,
"is_parallel": true,
"self": 730.9195039076731
},
"steps_from_proto": {
"total": 46.32163371145725,
"count": 63549,
"is_parallel": true,
"self": 11.537732562981546,
"children": {
"_process_rank_one_or_two_observation": {
"total": 34.78390114847571,
"count": 508392,
"is_parallel": true,
"self": 34.78390114847571
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 430.86263781785965,
"count": 63550,
"self": 2.588444850407541,
"children": {
"process_trajectory": {
"total": 76.32361845392734,
"count": 63550,
"self": 76.18030210863799,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14331634528934956,
"count": 2,
"self": 0.14331634528934956
}
}
},
"_update_policy": {
"total": 351.95057451352477,
"count": 449,
"self": 213.26261940971017,
"children": {
"TorchPPOOptimizer.update": {
"total": 138.6879551038146,
"count": 22791,
"self": 138.6879551038146
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.039596438407898e-06,
"count": 1,
"self": 2.039596438407898e-06
},
"TrainerController._save_models": {
"total": 0.06468697544187307,
"count": 1,
"self": 0.0014809630811214447,
"children": {
"RLTrainer._checkpoint": {
"total": 0.06320601236075163,
"count": 1,
"self": 0.06320601236075163
}
}
}
}
}
}
}