ppo-Pyramids / run_logs /timers.json
HadrienCr's picture
Deep RL Course 1st try
11cef96 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.24701941013336182,
"min": 0.22370147705078125,
"max": 0.34114405512809753,
"count": 22
},
"Pyramids.Policy.Entropy.sum": {
"value": 7398.7255859375,
"min": 6718.20263671875,
"max": 10327.11328125,
"count": 22
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 245.140625,
"min": 245.140625,
"max": 327.10526315789474,
"count": 22
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31378.0,
"min": 17514.0,
"max": 31697.0,
"count": 22
},
"Pyramids.Step.mean": {
"value": 1649913.0,
"min": 1019918.0,
"max": 1649913.0,
"count": 22
},
"Pyramids.Step.sum": {
"value": 1649913.0,
"min": 1019918.0,
"max": 1649913.0,
"count": 22
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.8134933114051819,
"min": 0.5966002345085144,
"max": 0.8134933114051819,
"count": 22
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 237.5400390625,
"min": 128.40707397460938,
"max": 237.5400390625,
"count": 22
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.010810206644237041,
"min": -0.010810206644237041,
"max": 0.02211012877523899,
"count": 22
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -3.1565804481506348,
"min": -3.1565804481506348,
"max": 6.235056400299072,
"count": 22
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7241364187279413,
"min": 1.5469494552204484,
"max": 1.7244504210741625,
"count": 22
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 222.41359801590443,
"min": 114.48539961874485,
"max": 222.41359801590443,
"count": 22
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7241364187279413,
"min": 1.5469494552204484,
"max": 1.7244504210741625,
"count": 22
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 222.41359801590443,
"min": 114.48539961874485,
"max": 222.41359801590443,
"count": 22
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.02222682206696098,
"min": 0.02222682206696098,
"max": 0.033547179050153565,
"count": 22
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.8672600466379663,
"min": 1.6793347745988285,
"max": 3.316833586024586,
"count": 22
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07097127216761959,
"min": 0.06550184718696941,
"max": 0.0729533370778275,
"count": 22
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9935978103466742,
"min": 0.6187000768487434,
"max": 1.0943000561674125,
"count": 22
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014025119535978141,
"min": 0.01275076393808593,
"max": 0.016184358942296563,
"count": 22
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.19635167350369398,
"min": 0.142480781422266,
"max": 0.24276538413444845,
"count": 22
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.00013652466877750956,
"min": 0.00013652466877750956,
"max": 0.00019894063368646668,
"count": 22
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.001911345362885134,
"min": 0.0017904657031782001,
"max": 0.0027679143773620667,
"count": 22
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.14550820476190476,
"min": 0.14550820476190476,
"max": 0.16631353333333332,
"count": 22
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.0371148666666667,
"min": 1.4968218,
"max": 2.422637933333333,
"count": 22
},
"Pyramids.Policy.Beta.mean": {
"value": 0.004556269655714285,
"min": 0.004556269655714285,
"max": 0.00663472198,
"count": 22
},
"Pyramids.Policy.Beta.sum": {
"value": 0.06378777517999999,
"min": 0.05971249782,
"max": 0.09232152953999999,
"count": 22
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008655091747641563,
"min": 0.008655091747641563,
"max": 0.010226244106888771,
"count": 22
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12117128074169159,
"min": 0.08283895254135132,
"max": 0.15339365601539612,
"count": 22
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 22
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 22
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1719039194",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --resume",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1719040852"
},
"total": 1658.0656046610002,
"count": 1,
"self": 0.3311192339997433,
"children": {
"run_training.setup": {
"total": 0.05588220699974045,
"count": 1,
"self": 0.05588220699974045
},
"TrainerController.start_learning": {
"total": 1657.6786032200007,
"count": 1,
"self": 0.97211951002555,
"children": {
"TrainerController._reset_env": {
"total": 2.320944727999631,
"count": 1,
"self": 2.320944727999631
},
"TrainerController.advance": {
"total": 1654.2426297039756,
"count": 43570,
"self": 1.0577510939820058,
"children": {
"env_step": {
"total": 1222.0328911869574,
"count": 43570,
"self": 1130.7672467770462,
"children": {
"SubprocessEnvManager._take_step": {
"total": 90.65485426594023,
"count": 43570,
"self": 3.1559880768672883,
"children": {
"TorchPolicy.evaluate": {
"total": 87.49886618907294,
"count": 41715,
"self": 87.49886618907294
}
}
},
"workers": {
"total": 0.6107901439709167,
"count": 43569,
"self": 0.0,
"children": {
"worker_root": {
"total": 1654.05774715205,
"count": 43569,
"is_parallel": true,
"self": 608.3945897670465,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002053033999800391,
"count": 1,
"is_parallel": true,
"self": 0.0006524050004372839,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014006289993631071,
"count": 8,
"is_parallel": true,
"self": 0.0014006289993631071
}
}
},
"UnityEnvironment.step": {
"total": 0.04781640600049286,
"count": 1,
"is_parallel": true,
"self": 0.0006432730006054044,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004788030000781873,
"count": 1,
"is_parallel": true,
"self": 0.0004788030000781873
},
"communicator.exchange": {
"total": 0.045015523999609286,
"count": 1,
"is_parallel": true,
"self": 0.045015523999609286
},
"steps_from_proto": {
"total": 0.0016788060001999838,
"count": 1,
"is_parallel": true,
"self": 0.00034541300101409433,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013333929991858895,
"count": 8,
"is_parallel": true,
"self": 0.0013333929991858895
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1045.6631573850036,
"count": 43568,
"is_parallel": true,
"self": 23.285176843111003,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 15.537738832067589,
"count": 43568,
"is_parallel": true,
"self": 15.537738832067589
},
"communicator.exchange": {
"total": 941.4118091279843,
"count": 43568,
"is_parallel": true,
"self": 941.4118091279843
},
"steps_from_proto": {
"total": 65.42843258184075,
"count": 43568,
"is_parallel": true,
"self": 13.649580882759437,
"children": {
"_process_rank_one_or_two_observation": {
"total": 51.778851699081315,
"count": 348544,
"is_parallel": true,
"self": 51.778851699081315
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 431.1519874230362,
"count": 43569,
"self": 2.00465622304273,
"children": {
"process_trajectory": {
"total": 88.9402615469944,
"count": 43569,
"self": 88.73077637299411,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20948517400029232,
"count": 2,
"self": 0.20948517400029232
}
}
},
"_update_policy": {
"total": 340.20706965299905,
"count": 315,
"self": 199.04264628404417,
"children": {
"TorchPPOOptimizer.update": {
"total": 141.16442336895489,
"count": 15153,
"self": 141.16442336895489
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.201999788871035e-06,
"count": 1,
"self": 3.201999788871035e-06
},
"TrainerController._save_models": {
"total": 0.14290607600014482,
"count": 1,
"self": 0.00190373500026908,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14100234099987574,
"count": 1,
"self": 0.14100234099987574
}
}
}
}
}
}
}