ppo-PyramidsRND / run_logs /timers.json
WilliamADSP's picture
First Push
c4c486b
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.41527074575424194,
"min": 0.41527074575424194,
"max": 1.4686696529388428,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12424.900390625,
"min": 12424.900390625,
"max": 44553.5625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989982.0,
"min": 29952.0,
"max": 989982.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989982.0,
"min": 29952.0,
"max": 989982.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5986225605010986,
"min": -0.14470091462135315,
"max": 0.6275726556777954,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 170.607421875,
"min": -34.29411697387695,
"max": 175.7203369140625,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.011461108922958374,
"min": -0.011850395239889622,
"max": 0.3242793381214142,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.266416072845459,
"min": -3.318110704421997,
"max": 78.15132141113281,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06848049495343325,
"min": 0.0662977954152988,
"max": 0.0750831764767418,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0272074243014988,
"min": 0.5007734487258734,
"max": 1.028375415422488,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014373475379478702,
"min": 0.0001282080601979811,
"max": 0.016134823811409574,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21560213069218054,
"min": 0.0014102886621777922,
"max": 0.24195037320411455,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 8.729540839206669e-06,
"min": 8.729540839206669e-06,
"max": 0.00034434240161645714,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00013094311258810004,
"min": 0.00013094311258810004,
"max": 0.0036645870529751997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10249412666666667,
"min": 0.10249412666666667,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5374119000000002,
"min": 1.3886848,
"max": 2.4014147,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002591632540000001,
"min": 0.0002591632540000001,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003887448810000001,
"min": 0.003887448810000001,
"max": 0.10472777752000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010796514339745045,
"min": 0.010636893101036549,
"max": 0.43772488832473755,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.16194771230220795,
"min": 0.14891649782657623,
"max": 3.0640742778778076,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 321.65909090909093,
"min": 320.7613636363636,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28306.0,
"min": 15984.0,
"max": 34191.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6328795288096776,
"min": -1.0000000521540642,
"max": 1.6577124811398487,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 143.69339853525162,
"min": -31.998401656746864,
"max": 159.14039818942547,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6328795288096776,
"min": -1.0000000521540642,
"max": 1.6577124811398487,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 143.69339853525162,
"min": -31.998401656746864,
"max": 159.14039818942547,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03617759909932581,
"min": 0.03537809271711012,
"max": 9.330483506433666,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.1836287207406713,
"min": 3.1836287207406713,
"max": 149.28773610293865,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1683038935",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1683041496"
},
"total": 2560.309452869,
"count": 1,
"self": 0.5382950169996548,
"children": {
"run_training.setup": {
"total": 0.04720359299994925,
"count": 1,
"self": 0.04720359299994925
},
"TrainerController.start_learning": {
"total": 2559.7239542590005,
"count": 1,
"self": 1.882749658960165,
"children": {
"TrainerController._reset_env": {
"total": 4.638096714999847,
"count": 1,
"self": 4.638096714999847
},
"TrainerController.advance": {
"total": 2553.0921184380404,
"count": 63722,
"self": 1.8439538590787379,
"children": {
"env_step": {
"total": 1838.0413715179686,
"count": 63722,
"self": 1700.759900933833,
"children": {
"SubprocessEnvManager._take_step": {
"total": 136.1661480230805,
"count": 63722,
"self": 5.781753429050923,
"children": {
"TorchPolicy.evaluate": {
"total": 130.38439459402957,
"count": 62551,
"self": 130.38439459402957
}
}
},
"workers": {
"total": 1.115322561055109,
"count": 63722,
"self": 0.0,
"children": {
"worker_root": {
"total": 2553.02942234395,
"count": 63722,
"is_parallel": true,
"self": 991.5787320539471,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019035689999782335,
"count": 1,
"is_parallel": true,
"self": 0.0005064549998223811,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013971140001558524,
"count": 8,
"is_parallel": true,
"self": 0.0013971140001558524
}
}
},
"UnityEnvironment.step": {
"total": 0.058730231999788884,
"count": 1,
"is_parallel": true,
"self": 0.0005944239999280398,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005482120000124269,
"count": 1,
"is_parallel": true,
"self": 0.0005482120000124269
},
"communicator.exchange": {
"total": 0.055692323999892324,
"count": 1,
"is_parallel": true,
"self": 0.055692323999892324
},
"steps_from_proto": {
"total": 0.001895271999956094,
"count": 1,
"is_parallel": true,
"self": 0.00038874599954397127,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015065260004121228,
"count": 8,
"is_parallel": true,
"self": 0.0015065260004121228
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1561.450690290003,
"count": 63721,
"is_parallel": true,
"self": 37.1608397589614,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.69688357801988,
"count": 63721,
"is_parallel": true,
"self": 25.69688357801988
},
"communicator.exchange": {
"total": 1383.638133103052,
"count": 63721,
"is_parallel": true,
"self": 1383.638133103052
},
"steps_from_proto": {
"total": 114.95483384996965,
"count": 63721,
"is_parallel": true,
"self": 24.424521113055334,
"children": {
"_process_rank_one_or_two_observation": {
"total": 90.53031273691431,
"count": 509768,
"is_parallel": true,
"self": 90.53031273691431
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 713.206793060993,
"count": 63722,
"self": 3.2524882970058115,
"children": {
"process_trajectory": {
"total": 120.4493139199908,
"count": 63722,
"self": 120.22319856399099,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22611535599980925,
"count": 2,
"self": 0.22611535599980925
}
}
},
"_update_policy": {
"total": 589.5049908439964,
"count": 439,
"self": 382.1550882140373,
"children": {
"TorchPPOOptimizer.update": {
"total": 207.34990262995916,
"count": 22839,
"self": 207.34990262995916
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.160000010713702e-06,
"count": 1,
"self": 1.160000010713702e-06
},
"TrainerController._save_models": {
"total": 0.1109882870000547,
"count": 1,
"self": 0.0017257449999306118,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10926254200012409,
"count": 1,
"self": 0.10926254200012409
}
}
}
}
}
}
}