ppo-Huggy / run_logs /timers.json
dfsj's picture
Huggy
96573db
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4071084260940552,
"min": 1.4071084260940552,
"max": 1.4288718700408936,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69084.8046875,
"min": 69082.8125,
"max": 76476.1015625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 81.9469320066335,
"min": 81.25082508250826,
"max": 399.96,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49414.0,
"min": 48741.0,
"max": 50066.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999981.0,
"min": 49425.0,
"max": 1999981.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999981.0,
"min": 49425.0,
"max": 1999981.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.426374673843384,
"min": 0.06314229965209961,
"max": 2.455024003982544,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1463.1038818359375,
"min": 7.829645156860352,
"max": 1471.4853515625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7479341614305675,
"min": 1.8530219452035042,
"max": 3.934352233312851,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2260.0042993426323,
"min": 229.77472120523453,
"max": 2299.499041080475,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7479341614305675,
"min": 1.8530219452035042,
"max": 3.934352233312851,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2260.0042993426323,
"min": 229.77472120523453,
"max": 2299.499041080475,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01730710079233783,
"min": 0.012825166863492795,
"max": 0.019413210502655903,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05192130237701349,
"min": 0.02906668376381276,
"max": 0.05490471239609178,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.056537937331530785,
"min": 0.022325898396472137,
"max": 0.06261863364941543,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16961381199459236,
"min": 0.04465179679294427,
"max": 0.1878559009482463,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.260548913183336e-06,
"min": 3.260548913183336e-06,
"max": 0.000295365826544725,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.781646739550008e-06,
"min": 9.781646739550008e-06,
"max": 0.0008440779186407,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10108681666666669,
"min": 0.10108681666666669,
"max": 0.19845527500000004,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30326045000000007,
"min": 0.20730475000000004,
"max": 0.5813593,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.423215166666671e-05,
"min": 6.423215166666671e-05,
"max": 0.0049229182225,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019269645500000015,
"min": 0.00019269645500000015,
"max": 0.014069829070000001,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1671775473",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1671777692"
},
"total": 2218.8483475389994,
"count": 1,
"self": 0.3925816609994399,
"children": {
"run_training.setup": {
"total": 0.11553631600008885,
"count": 1,
"self": 0.11553631600008885
},
"TrainerController.start_learning": {
"total": 2218.340229562,
"count": 1,
"self": 3.6161656351932834,
"children": {
"TrainerController._reset_env": {
"total": 8.394557968999834,
"count": 1,
"self": 8.394557968999834
},
"TrainerController.advance": {
"total": 2206.209057899807,
"count": 232565,
"self": 4.059504177940653,
"children": {
"env_step": {
"total": 1735.0459788339222,
"count": 232565,
"self": 1457.3187131627728,
"children": {
"SubprocessEnvManager._take_step": {
"total": 275.2210696911284,
"count": 232565,
"self": 14.531102265051231,
"children": {
"TorchPolicy.evaluate": {
"total": 260.68996742607715,
"count": 222857,
"self": 64.53581327096117,
"children": {
"TorchPolicy.sample_actions": {
"total": 196.15415415511598,
"count": 222857,
"self": 196.15415415511598
}
}
}
}
},
"workers": {
"total": 2.506195980020948,
"count": 232565,
"self": 0.0,
"children": {
"worker_root": {
"total": 2210.532440753062,
"count": 232565,
"is_parallel": true,
"self": 1006.2722515139617,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0023360509999292844,
"count": 1,
"is_parallel": true,
"self": 0.00031203400021695415,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0020240169997123303,
"count": 2,
"is_parallel": true,
"self": 0.0020240169997123303
}
}
},
"UnityEnvironment.step": {
"total": 0.026873391000208358,
"count": 1,
"is_parallel": true,
"self": 0.00027474000034999335,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00017139399960797164,
"count": 1,
"is_parallel": true,
"self": 0.00017139399960797164
},
"communicator.exchange": {
"total": 0.025740078000126232,
"count": 1,
"is_parallel": true,
"self": 0.025740078000126232
},
"steps_from_proto": {
"total": 0.0006871790001241607,
"count": 1,
"is_parallel": true,
"self": 0.00023132000023906585,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004558589998850948,
"count": 2,
"is_parallel": true,
"self": 0.0004558589998850948
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1204.2601892391003,
"count": 232564,
"is_parallel": true,
"self": 35.05765227601569,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 77.01451229606982,
"count": 232564,
"is_parallel": true,
"self": 77.01451229606982
},
"communicator.exchange": {
"total": 997.8051067359406,
"count": 232564,
"is_parallel": true,
"self": 997.8051067359406
},
"steps_from_proto": {
"total": 94.38291793107419,
"count": 232564,
"is_parallel": true,
"self": 38.90222101187828,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.48069691919591,
"count": 465128,
"is_parallel": true,
"self": 55.48069691919591
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 467.103574887944,
"count": 232565,
"self": 5.578020810099588,
"children": {
"process_trajectory": {
"total": 149.11544594184352,
"count": 232565,
"self": 147.80089823384287,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3145477080006458,
"count": 10,
"self": 1.3145477080006458
}
}
},
"_update_policy": {
"total": 312.4101081360009,
"count": 97,
"self": 259.05270883799767,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.3573992980032,
"count": 2910,
"self": 53.3573992980032
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.047999830916524e-06,
"count": 1,
"self": 1.047999830916524e-06
},
"TrainerController._save_models": {
"total": 0.12044701000013447,
"count": 1,
"self": 0.0019482060006339452,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11849880399950052,
"count": 1,
"self": 0.11849880399950052
}
}
}
}
}
}
}