ppo-Pyramids / run_logs /timers.json
Nake's picture
First Push
a03784a
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5708813071250916,
"min": 0.5655150413513184,
"max": 1.4859533309936523,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 17099.037109375,
"min": 16977.06640625,
"max": 45077.87890625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989956.0,
"min": 29952.0,
"max": 989956.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989956.0,
"min": 29952.0,
"max": 989956.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.06450152397155762,
"min": -0.11216867715120316,
"max": -0.03278736397624016,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -15.609369277954102,
"min": -26.920482635498047,
"max": -7.770605087280273,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.01114608719944954,
"min": 0.0067435503005981445,
"max": 0.18880322575569153,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.6973531246185303,
"min": 1.6386827230453491,
"max": 45.312774658203125,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06820288197770362,
"min": 0.06445500577475677,
"max": 0.07252554854213991,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9548403476878508,
"min": 0.4788092781673186,
"max": 1.033808975441692,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0019665540029339813,
"min": 5.042557388877481e-05,
"max": 0.0049891224837221955,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.027531756041075738,
"min": 0.0005546813127765229,
"max": 0.05715166313196581,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.4364975211999985e-06,
"min": 7.4364975211999985e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010411096529679998,
"min": 0.00010411096529679998,
"max": 0.002900874233042,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1024788,
"min": 0.1024788,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4347032,
"min": 1.327104,
"max": 2.166958,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025763211999999994,
"min": 0.00025763211999999994,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003606849679999999,
"min": 0.003606849679999999,
"max": 0.0967191042,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010786646977066994,
"min": 0.010786646977066994,
"max": 0.2914198935031891,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1510130614042282,
"min": 0.1510130614042282,
"max": 2.0399391651153564,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 969.0645161290323,
"min": 858.4193548387096,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30041.0,
"min": 15984.0,
"max": 32456.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.776090374156352,
"min": -1.0000000521540642,
"max": -0.3427355195726118,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -24.058801598846912,
"min": -32.000001668930054,
"max": -10.624801106750965,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.776090374156352,
"min": -1.0000000521540642,
"max": -0.3427355195726118,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -24.058801598846912,
"min": -32.000001668930054,
"max": -10.624801106750965,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.10775177314456913,
"min": 0.10153135738801211,
"max": 5.862117176875472,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.340304967481643,
"min": 3.1965397249441594,
"max": 93.79387483000755,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1682382792",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1682384833"
},
"total": 2041.520196816,
"count": 1,
"self": 0.8555173459999423,
"children": {
"run_training.setup": {
"total": 0.11233643200012011,
"count": 1,
"self": 0.11233643200012011
},
"TrainerController.start_learning": {
"total": 2040.552343038,
"count": 1,
"self": 1.4334295900398502,
"children": {
"TrainerController._reset_env": {
"total": 3.979769903000033,
"count": 1,
"self": 3.979769903000033
},
"TrainerController.advance": {
"total": 2034.9785624699603,
"count": 62979,
"self": 1.5554788879740045,
"children": {
"env_step": {
"total": 1408.948517668032,
"count": 62979,
"self": 1295.4697900840447,
"children": {
"SubprocessEnvManager._take_step": {
"total": 112.59227783697634,
"count": 62979,
"self": 5.209899514972221,
"children": {
"TorchPolicy.evaluate": {
"total": 107.38237832200412,
"count": 62554,
"self": 107.38237832200412
}
}
},
"workers": {
"total": 0.8864497470110564,
"count": 62979,
"self": 0.0,
"children": {
"worker_root": {
"total": 2035.039257309963,
"count": 62979,
"is_parallel": true,
"self": 857.4642484309554,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004779885000061768,
"count": 1,
"is_parallel": true,
"self": 0.003313747999982297,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014661370000794705,
"count": 8,
"is_parallel": true,
"self": 0.0014661370000794705
}
}
},
"UnityEnvironment.step": {
"total": 0.04782473199998094,
"count": 1,
"is_parallel": true,
"self": 0.0006112989999564888,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005176990000563819,
"count": 1,
"is_parallel": true,
"self": 0.0005176990000563819
},
"communicator.exchange": {
"total": 0.044868495999935476,
"count": 1,
"is_parallel": true,
"self": 0.044868495999935476
},
"steps_from_proto": {
"total": 0.0018272380000325938,
"count": 1,
"is_parallel": true,
"self": 0.00040979000004881527,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014174479999837786,
"count": 8,
"is_parallel": true,
"self": 0.0014174479999837786
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1177.5750088790076,
"count": 62978,
"is_parallel": true,
"self": 34.22461183014843,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.94166392997522,
"count": 62978,
"is_parallel": true,
"self": 25.94166392997522
},
"communicator.exchange": {
"total": 1014.62582821093,
"count": 62978,
"is_parallel": true,
"self": 1014.62582821093
},
"steps_from_proto": {
"total": 102.78290490795393,
"count": 62978,
"is_parallel": true,
"self": 21.943200839087922,
"children": {
"_process_rank_one_or_two_observation": {
"total": 80.83970406886601,
"count": 503824,
"is_parallel": true,
"self": 80.83970406886601
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 624.4745659139542,
"count": 62979,
"self": 2.315489458949287,
"children": {
"process_trajectory": {
"total": 109.74366404500597,
"count": 62979,
"self": 109.4614732710063,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2821907739996732,
"count": 2,
"self": 0.2821907739996732
}
}
},
"_update_policy": {
"total": 512.4154124099989,
"count": 410,
"self": 329.06317192099027,
"children": {
"TorchPPOOptimizer.update": {
"total": 183.35224048900864,
"count": 22908,
"self": 183.35224048900864
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4709999049955513e-06,
"count": 1,
"self": 1.4709999049955513e-06
},
"TrainerController._save_models": {
"total": 0.1605796039998495,
"count": 1,
"self": 0.0021674390000043786,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15841216499984512,
"count": 1,
"self": 0.15841216499984512
}
}
}
}
}
}
}