ppo-Pyramids / run_logs /timers.json
MetaAnomie's picture
First attempt
47909b9
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3410817086696625,
"min": 0.34023937582969666,
"max": 1.47316575050354,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10254.2802734375,
"min": 10185.40625,
"max": 44689.95703125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989943.0,
"min": 29931.0,
"max": 989943.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989943.0,
"min": 29931.0,
"max": 989943.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6495689153671265,
"min": -0.1056307703256607,
"max": 0.6508188247680664,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 182.52886962890625,
"min": -25.35138511657715,
"max": 184.83255004882812,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.0010810631792992353,
"min": -0.008225778117775917,
"max": 0.49512872099876404,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 0.3037787675857544,
"min": -2.336121082305908,
"max": 117.34550476074219,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06575939865918674,
"min": 0.06575939865918674,
"max": 0.07441798879807315,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9206315812286143,
"min": 0.5209259215865121,
"max": 1.0442729147616774,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0160100356144886,
"min": 0.0005157705674715772,
"max": 0.0160100356144886,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2241404986028404,
"min": 0.00567347624218735,
"max": 0.2346591379949435,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.0182983168564294e-05,
"min": 1.0182983168564294e-05,
"max": 0.0003935341730450286,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00014256176435990011,
"min": 0.00014256176435990011,
"max": 0.004678856630285901,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10254572142857142,
"min": 0.10254572142857142,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4356400999999999,
"min": 1.3886848,
"max": 2.5697141,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026431757071428597,
"min": 0.00026431757071428597,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037004459900000035,
"min": 0.0037004459900000035,
"max": 0.11699443859,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.013475646264851093,
"min": 0.013475646264851093,
"max": 0.4892297387123108,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.18865904211997986,
"min": 0.18865904211997986,
"max": 3.4246082305908203,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 288.19607843137254,
"min": 288.19607843137254,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29396.0,
"min": 16858.0,
"max": 32915.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6918601758271745,
"min": -0.9999500522390008,
"max": 1.706809981316328,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 174.26159811019897,
"min": -31.998401671648026,
"max": 174.26159811019897,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6918601758271745,
"min": -0.9999500522390008,
"max": 1.706809981316328,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 174.26159811019897,
"min": -31.998401671648026,
"max": 174.26159811019897,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04020051931390466,
"min": 0.04020051931390466,
"max": 9.45686684285893,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.1406534893321805,
"min": 4.064705326090916,
"max": 160.76673632860184,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1681979319",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1681982783"
},
"total": 3463.943395468,
"count": 1,
"self": 0.5973215210001399,
"children": {
"run_training.setup": {
"total": 0.14047379500016177,
"count": 1,
"self": 0.14047379500016177
},
"TrainerController.start_learning": {
"total": 3463.2056001519995,
"count": 1,
"self": 2.424343712962127,
"children": {
"TrainerController._reset_env": {
"total": 1.1377898390001064,
"count": 1,
"self": 1.1377898390001064
},
"TrainerController.advance": {
"total": 3459.5216198160374,
"count": 63869,
"self": 2.6314636320348654,
"children": {
"env_step": {
"total": 2311.939881856025,
"count": 63869,
"self": 2168.8915267690672,
"children": {
"SubprocessEnvManager._take_step": {
"total": 141.58296216402277,
"count": 63869,
"self": 7.445506512946849,
"children": {
"TorchPolicy.evaluate": {
"total": 134.13745565107592,
"count": 62576,
"self": 134.13745565107592
}
}
},
"workers": {
"total": 1.465392922934825,
"count": 63869,
"self": 0.0,
"children": {
"worker_root": {
"total": 3455.9085463850847,
"count": 63869,
"is_parallel": true,
"self": 1461.038977868101,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0029145660000722273,
"count": 1,
"is_parallel": true,
"self": 0.0009469569999964733,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001967609000075754,
"count": 8,
"is_parallel": true,
"self": 0.001967609000075754
}
}
},
"UnityEnvironment.step": {
"total": 0.10886822900010884,
"count": 1,
"is_parallel": true,
"self": 0.0006587930001842324,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0006102540000938461,
"count": 1,
"is_parallel": true,
"self": 0.0006102540000938461
},
"communicator.exchange": {
"total": 0.10560317999988911,
"count": 1,
"is_parallel": true,
"self": 0.10560317999988911
},
"steps_from_proto": {
"total": 0.0019960019999416545,
"count": 1,
"is_parallel": true,
"self": 0.0004846179999731248,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015113839999685297,
"count": 8,
"is_parallel": true,
"self": 0.0015113839999685297
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1994.8695685169837,
"count": 63868,
"is_parallel": true,
"self": 45.98950803900834,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 27.437251301027118,
"count": 63868,
"is_parallel": true,
"self": 27.437251301027118
},
"communicator.exchange": {
"total": 1788.7070102210332,
"count": 63868,
"is_parallel": true,
"self": 1788.7070102210332
},
"steps_from_proto": {
"total": 132.73579895591502,
"count": 63868,
"is_parallel": true,
"self": 29.902705926916497,
"children": {
"_process_rank_one_or_two_observation": {
"total": 102.83309302899852,
"count": 510944,
"is_parallel": true,
"self": 102.83309302899852
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1144.9502743279777,
"count": 63869,
"self": 4.63729208108316,
"children": {
"process_trajectory": {
"total": 149.0316192759028,
"count": 63869,
"self": 148.79804945590286,
"children": {
"RLTrainer._checkpoint": {
"total": 0.23356981999995696,
"count": 2,
"self": 0.23356981999995696
}
}
},
"_update_policy": {
"total": 991.2813629709917,
"count": 452,
"self": 414.166394952892,
"children": {
"TorchPPOOptimizer.update": {
"total": 577.1149680180997,
"count": 22806,
"self": 577.1149680180997
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1579995771171525e-06,
"count": 1,
"self": 1.1579995771171525e-06
},
"TrainerController._save_models": {
"total": 0.12184562600032223,
"count": 1,
"self": 0.001737318000778032,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1201083079995442,
"count": 1,
"self": 0.1201083079995442
}
}
}
}
}
}
}