ppo-Pyramids-T1 / run_logs /timers.json
Marco A. Egea Moreno
First Push
ddf0ebf verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.752404510974884,
"min": 0.7446481585502625,
"max": 1.4475173950195312,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 22475.828125,
"min": 22220.30078125,
"max": 43911.88671875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989914.0,
"min": 29952.0,
"max": 989914.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989914.0,
"min": 29952.0,
"max": 989914.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.39320582151412964,
"min": -0.1025870218873024,
"max": 0.4089769124984741,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 106.16557312011719,
"min": -24.723472595214844,
"max": 110.42376708984375,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.012855459935963154,
"min": -0.03745002672076225,
"max": 0.3895516097545624,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -3.4709742069244385,
"min": -9.774456977844238,
"max": 92.32373046875,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06731963285989147,
"min": 0.0657065052671667,
"max": 0.070955144798777,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.009794492898372,
"min": 0.48059778338115555,
"max": 1.0360445273886025,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01494436897462412,
"min": 0.000490965161048273,
"max": 0.01494436897462412,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2241655346193618,
"min": 0.0049096516104827305,
"max": 0.2241655346193618,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 2.48501751508e-06,
"min": 2.48501751508e-06,
"max": 9.838354447360002e-05,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 3.7275262726200006e-05,
"min": 3.7275262726200006e-05,
"max": 0.0010851785148216998,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10248492,
"min": 0.10248492,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5372738,
"min": 1.3886848,
"max": 2.4840030000000004,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025824350800000004,
"min": 0.00025824350800000004,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003873652620000001,
"min": 0.003873652620000001,
"max": 0.10853931216999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008831266313791275,
"min": 0.008831266313791275,
"max": 0.3580308258533478,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.13246899843215942,
"min": 0.12596070766448975,
"max": 2.506215810775757,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 444.5147058823529,
"min": 435.015873015873,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30227.0,
"min": 15984.0,
"max": 33629.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.378970569547485,
"min": -1.0000000521540642,
"max": 1.408266638834325,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 93.76999872922897,
"min": -31.99920167028904,
"max": 93.76999872922897,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.378970569547485,
"min": -1.0000000521540642,
"max": 1.408266638834325,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 93.76999872922897,
"min": -31.99920167028904,
"max": 93.76999872922897,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04124646667621752,
"min": 0.04124646667621752,
"max": 8.720934831537306,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.8047597339827917,
"min": 2.8047597339827917,
"max": 139.5349573045969,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1705114713",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./ml-agents/config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1705118565"
},
"total": 3852.379509343,
"count": 1,
"self": 0.7309545810003328,
"children": {
"run_training.setup": {
"total": 0.09398214199973154,
"count": 1,
"self": 0.09398214199973154
},
"TrainerController.start_learning": {
"total": 3851.55457262,
"count": 1,
"self": 2.4573126248706103,
"children": {
"TrainerController._reset_env": {
"total": 2.999534611000854,
"count": 1,
"self": 2.999534611000854
},
"TrainerController.advance": {
"total": 3846.004651636127,
"count": 63481,
"self": 2.8149224512944784,
"children": {
"env_step": {
"total": 2138.822948975987,
"count": 63481,
"self": 1962.8046567361998,
"children": {
"SubprocessEnvManager._take_step": {
"total": 174.36495148488757,
"count": 63481,
"self": 6.9447794237712515,
"children": {
"TorchPolicy.evaluate": {
"total": 167.42017206111632,
"count": 62575,
"self": 167.42017206111632
}
}
},
"workers": {
"total": 1.6533407548995456,
"count": 63481,
"self": 0.0,
"children": {
"worker_root": {
"total": 3844.9061464941024,
"count": 63481,
"is_parallel": true,
"self": 2072.589791339935,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.003106320000370033,
"count": 1,
"is_parallel": true,
"self": 0.0012031769983877894,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019031430019822437,
"count": 8,
"is_parallel": true,
"self": 0.0019031430019822437
}
}
},
"UnityEnvironment.step": {
"total": 0.0873848370010819,
"count": 1,
"is_parallel": true,
"self": 0.0007657940004719421,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005388680001487955,
"count": 1,
"is_parallel": true,
"self": 0.0005388680001487955
},
"communicator.exchange": {
"total": 0.08392997200098762,
"count": 1,
"is_parallel": true,
"self": 0.08392997200098762
},
"steps_from_proto": {
"total": 0.0021502029994735494,
"count": 1,
"is_parallel": true,
"self": 0.00044258400339458603,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017076189960789634,
"count": 8,
"is_parallel": true,
"self": 0.0017076189960789634
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1772.3163551541675,
"count": 63480,
"is_parallel": true,
"self": 49.35308143341172,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 28.064904663837297,
"count": 63480,
"is_parallel": true,
"self": 28.064904663837297
},
"communicator.exchange": {
"total": 1560.754516314113,
"count": 63480,
"is_parallel": true,
"self": 1560.754516314113
},
"steps_from_proto": {
"total": 134.1438527428054,
"count": 63480,
"is_parallel": true,
"self": 29.099854613930802,
"children": {
"_process_rank_one_or_two_observation": {
"total": 105.04399812887459,
"count": 507840,
"is_parallel": true,
"self": 105.04399812887459
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1704.3667802088457,
"count": 63481,
"self": 5.318831624765153,
"children": {
"process_trajectory": {
"total": 176.11431703810194,
"count": 63481,
"self": 175.86328792510176,
"children": {
"RLTrainer._checkpoint": {
"total": 0.25102911300018604,
"count": 2,
"self": 0.25102911300018604
}
}
},
"_update_policy": {
"total": 1522.9336315459786,
"count": 444,
"self": 627.9782676730065,
"children": {
"TorchPPOOptimizer.update": {
"total": 894.9553638729722,
"count": 38010,
"self": 894.9553638729722
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0250005288980901e-06,
"count": 1,
"self": 1.0250005288980901e-06
},
"TrainerController._save_models": {
"total": 0.09307272300065961,
"count": 1,
"self": 0.002401848001682083,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09067087499897752,
"count": 1,
"self": 0.09067087499897752
}
}
}
}
}
}
}