ppo-PyramidsRND / run_logs /timers.json
InMDev's picture
First Push
0f995d2 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5235576629638672,
"min": 0.5086338520050049,
"max": 1.471824288368225,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 15756.9921875,
"min": 15259.015625,
"max": 44649.26171875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989967.0,
"min": 29879.0,
"max": 989967.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989967.0,
"min": 29879.0,
"max": 989967.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.46829134225845337,
"min": -0.1322522759437561,
"max": 0.5012826323509216,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 125.50208282470703,
"min": -31.34379005432129,
"max": 137.85272216796875,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.02718088962137699,
"min": 0.007172916084527969,
"max": 0.6062001585960388,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 7.284478187561035,
"min": 1.9079957008361816,
"max": 143.66943359375,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06456502532195632,
"min": 0.06456502532195632,
"max": 0.07355335303111046,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9039103545073884,
"min": 0.4990554301428807,
"max": 1.0944907090115343,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.030393895399313243,
"min": 4.955724833027785e-05,
"max": 0.030393895399313243,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.4255145355903854,
"min": 0.0006442442282936121,
"max": 0.4255145355903854,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 2.5361597463849987e-05,
"min": 2.5361597463849987e-05,
"max": 0.0009841201444451284,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0003550623644938998,
"min": 0.0003550623644938998,
"max": 0.012026773097322703,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10253614999999996,
"min": 0.10253614999999996,
"max": 0.19841201428571426,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4355060999999996,
"min": 1.3888840999999998,
"max": 2.5696076999999997,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026336138499999997,
"min": 0.00026336138499999997,
"max": 0.009841360227142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036870593899999992,
"min": 0.0036870593899999992,
"max": 0.12027746227,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.0144507372751832,
"min": 0.0144507372751832,
"max": 0.6418731808662415,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.20231032371520996,
"min": 0.20231032371520996,
"max": 4.493112087249756,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 406.2972972972973,
"min": 388.48,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30066.0,
"min": 16678.0,
"max": 33458.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4357653145988782,
"min": -0.9998200515906016,
"max": 1.5562683261836632,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 107.68239859491587,
"min": -29.994601547718048,
"max": 122.94519776850939,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4357653145988782,
"min": -0.9998200515906016,
"max": 1.5562683261836632,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 107.68239859491587,
"min": -29.994601547718048,
"max": 122.94519776850939,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.06058847799198702,
"min": 0.06058847799198702,
"max": 12.15372983322424,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.5441358493990265,
"min": 4.471032229572302,
"max": 206.6134071648121,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1730783201",
"python_version": "3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn /content/ml-agents/config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1730785472"
},
"total": 2271.340723722,
"count": 1,
"self": 0.44032952400038994,
"children": {
"run_training.setup": {
"total": 0.11492714499991052,
"count": 1,
"self": 0.11492714499991052
},
"TrainerController.start_learning": {
"total": 2270.7854670529996,
"count": 1,
"self": 1.4296400390885537,
"children": {
"TrainerController._reset_env": {
"total": 3.7495636180001384,
"count": 1,
"self": 3.7495636180001384
},
"TrainerController.advance": {
"total": 2265.523913978912,
"count": 63683,
"self": 1.4994117518417625,
"children": {
"env_step": {
"total": 1552.9951192920425,
"count": 63683,
"self": 1393.615920595099,
"children": {
"SubprocessEnvManager._take_step": {
"total": 158.4936713009456,
"count": 63683,
"self": 4.91421089798996,
"children": {
"TorchPolicy.evaluate": {
"total": 153.57946040295565,
"count": 62573,
"self": 153.57946040295565
}
}
},
"workers": {
"total": 0.8855273959979968,
"count": 63683,
"self": 0.0,
"children": {
"worker_root": {
"total": 2265.316386705951,
"count": 63683,
"is_parallel": true,
"self": 995.6431665069981,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004318263000186562,
"count": 1,
"is_parallel": true,
"self": 0.002080732999729662,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0022375300004569,
"count": 8,
"is_parallel": true,
"self": 0.0022375300004569
}
}
},
"UnityEnvironment.step": {
"total": 0.056692501999805245,
"count": 1,
"is_parallel": true,
"self": 0.0007164479998209572,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005009830001654336,
"count": 1,
"is_parallel": true,
"self": 0.0005009830001654336
},
"communicator.exchange": {
"total": 0.05361754099976679,
"count": 1,
"is_parallel": true,
"self": 0.05361754099976679
},
"steps_from_proto": {
"total": 0.0018575300000520656,
"count": 1,
"is_parallel": true,
"self": 0.00040857500016500126,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014489549998870643,
"count": 8,
"is_parallel": true,
"self": 0.0014489549998870643
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1269.673220198953,
"count": 63682,
"is_parallel": true,
"self": 34.519384733991046,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.237258836918045,
"count": 63682,
"is_parallel": true,
"self": 24.237258836918045
},
"communicator.exchange": {
"total": 1107.7985341170397,
"count": 63682,
"is_parallel": true,
"self": 1107.7985341170397
},
"steps_from_proto": {
"total": 103.1180425110042,
"count": 63682,
"is_parallel": true,
"self": 21.285903256972233,
"children": {
"_process_rank_one_or_two_observation": {
"total": 81.83213925403197,
"count": 509456,
"is_parallel": true,
"self": 81.83213925403197
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 711.0293829350276,
"count": 63683,
"self": 2.770359816038308,
"children": {
"process_trajectory": {
"total": 139.30413107699133,
"count": 63683,
"self": 139.0764808019917,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22765027499963253,
"count": 2,
"self": 0.22765027499963253
}
}
},
"_update_policy": {
"total": 568.9548920419979,
"count": 453,
"self": 319.88969404503814,
"children": {
"TorchPPOOptimizer.update": {
"total": 249.0651979969598,
"count": 22752,
"self": 249.0651979969598
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1089996405644342e-06,
"count": 1,
"self": 1.1089996405644342e-06
},
"TrainerController._save_models": {
"total": 0.08234830799938209,
"count": 1,
"self": 0.002007665999371966,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08034064200001012,
"count": 1,
"self": 0.08034064200001012
}
}
}
}
}
}
}