ppo-Huggy / run_logs /timers.json
Developer-Karthi's picture
Huggy
93e1cac
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4082129001617432,
"min": 1.4082129001617432,
"max": 1.4291261434555054,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71371.046875,
"min": 69300.125,
"max": 76253.6640625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 98.95409181636727,
"min": 87.47610619469026,
"max": 390.3203125,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49576.0,
"min": 48866.0,
"max": 50177.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999474.0,
"min": 49825.0,
"max": 1999474.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999474.0,
"min": 49825.0,
"max": 1999474.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3507883548736572,
"min": 0.031364452093839645,
"max": 2.435443639755249,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1180.095703125,
"min": 3.983285427093506,
"max": 1343.17822265625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.5545885409729414,
"min": 1.856823153148486,
"max": 4.00661034479794,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1784.4034475684166,
"min": 235.8165404498577,
"max": 2107.477041363716,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.5545885409729414,
"min": 1.856823153148486,
"max": 4.00661034479794,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1784.4034475684166,
"min": 235.8165404498577,
"max": 2107.477041363716,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01309355038449414,
"min": 0.01309355038449414,
"max": 0.020819643248493475,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.03928065115348242,
"min": 0.02776997757367402,
"max": 0.05938962733489461,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.056385114085343156,
"min": 0.02281155015031497,
"max": 0.06061395841340224,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16915534225602946,
"min": 0.04562310030062994,
"max": 0.16946651885906855,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.5793988069000078e-06,
"min": 3.5793988069000078e-06,
"max": 0.00029531160156279993,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0738196420700024e-05,
"min": 1.0738196420700024e-05,
"max": 0.0008441433186189,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10119310000000002,
"min": 0.10119310000000002,
"max": 0.19843719999999998,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30357930000000005,
"min": 0.20757015,
"max": 0.5813811,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.953569000000013e-05,
"min": 6.953569000000013e-05,
"max": 0.004922016279999999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002086070700000004,
"min": 0.0002086070700000004,
"max": 0.014070916889999998,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677950363",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677952860"
},
"total": 2496.601962099,
"count": 1,
"self": 0.4394619730001068,
"children": {
"run_training.setup": {
"total": 0.10729066900000817,
"count": 1,
"self": 0.10729066900000817
},
"TrainerController.start_learning": {
"total": 2496.055209457,
"count": 1,
"self": 4.642080394066397,
"children": {
"TrainerController._reset_env": {
"total": 10.068002261000004,
"count": 1,
"self": 10.068002261000004
},
"TrainerController.advance": {
"total": 2481.231852810933,
"count": 232180,
"self": 4.680931108039658,
"children": {
"env_step": {
"total": 1944.6610740329838,
"count": 232180,
"self": 1628.2733638119698,
"children": {
"SubprocessEnvManager._take_step": {
"total": 313.3882499590564,
"count": 232180,
"self": 16.806932282036087,
"children": {
"TorchPolicy.evaluate": {
"total": 296.5813176770203,
"count": 223034,
"self": 74.55629210304346,
"children": {
"TorchPolicy.sample_actions": {
"total": 222.02502557397685,
"count": 223034,
"self": 222.02502557397685
}
}
}
}
},
"workers": {
"total": 2.999460261957722,
"count": 232180,
"self": 0.0,
"children": {
"worker_root": {
"total": 2487.004930325999,
"count": 232180,
"is_parallel": true,
"self": 1171.3629595280352,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0010925530000349681,
"count": 1,
"is_parallel": true,
"self": 0.00034714900004928495,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007454039999856832,
"count": 2,
"is_parallel": true,
"self": 0.0007454039999856832
}
}
},
"UnityEnvironment.step": {
"total": 0.03139220399998521,
"count": 1,
"is_parallel": true,
"self": 0.0003083609999521286,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002218640000251071,
"count": 1,
"is_parallel": true,
"self": 0.0002218640000251071
},
"communicator.exchange": {
"total": 0.027937850999990133,
"count": 1,
"is_parallel": true,
"self": 0.027937850999990133
},
"steps_from_proto": {
"total": 0.002924128000017845,
"count": 1,
"is_parallel": true,
"self": 0.0002935800000045674,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0026305480000132775,
"count": 2,
"is_parallel": true,
"self": 0.0026305480000132775
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1315.6419707979637,
"count": 232179,
"is_parallel": true,
"self": 39.005549074831606,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 83.19513040604289,
"count": 232179,
"is_parallel": true,
"self": 83.19513040604289
},
"communicator.exchange": {
"total": 1099.6440867190527,
"count": 232179,
"is_parallel": true,
"self": 1099.6440867190527
},
"steps_from_proto": {
"total": 93.7972045980365,
"count": 232179,
"is_parallel": true,
"self": 39.95259874904582,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.84460584899068,
"count": 464358,
"is_parallel": true,
"self": 53.84460584899068
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 531.8898476699095,
"count": 232180,
"self": 7.131257833973564,
"children": {
"process_trajectory": {
"total": 169.8729961479367,
"count": 232180,
"self": 168.63438251993733,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2386136279993707,
"count": 10,
"self": 1.2386136279993707
}
}
},
"_update_policy": {
"total": 354.8855936879993,
"count": 97,
"self": 297.11033668099213,
"children": {
"TorchPPOOptimizer.update": {
"total": 57.775257007007156,
"count": 2910,
"self": 57.775257007007156
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.340002750628628e-07,
"count": 1,
"self": 8.340002750628628e-07
},
"TrainerController._save_models": {
"total": 0.11327315700009422,
"count": 1,
"self": 0.0023279140000340703,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11094524300006015,
"count": 1,
"self": 0.11094524300006015
}
}
}
}
}
}
}