ppo-Huggy / run_logs /timers.json
rnkVikcdkam's picture
Huggy
85ef6a6
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4053775072097778,
"min": 1.4053775072097778,
"max": 1.426833152770996,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71197.828125,
"min": 68470.234375,
"max": 79067.2734375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 88.12857142857143,
"min": 85.17555938037866,
"max": 400.456,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49352.0,
"min": 48886.0,
"max": 50318.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999971.0,
"min": 49432.0,
"max": 1999971.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999971.0,
"min": 49432.0,
"max": 1999971.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.359799861907959,
"min": 0.035762205719947815,
"max": 2.4518790245056152,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1321.4879150390625,
"min": 4.434513568878174,
"max": 1375.7991943359375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.6919659895556314,
"min": 1.8687680251896381,
"max": 3.909068181915233,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2067.5009541511536,
"min": 231.72723512351513,
"max": 2180.600521981716,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.6919659895556314,
"min": 1.8687680251896381,
"max": 3.909068181915233,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2067.5009541511536,
"min": 231.72723512351513,
"max": 2180.600521981716,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.013155460536897104,
"min": 0.013155460536897104,
"max": 0.019599162857048213,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.026310921073794208,
"min": 0.026310921073794208,
"max": 0.05679058816022006,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.06004755056152741,
"min": 0.02169024385511875,
"max": 0.06269465862876838,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.12009510112305483,
"min": 0.0433804877102375,
"max": 0.18808397588630515,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.555448481549996e-06,
"min": 4.555448481549996e-06,
"max": 0.00029532172655942496,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.110896963099992e-06,
"min": 9.110896963099992e-06,
"max": 0.0008439366186878,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10151844999999998,
"min": 0.10151844999999998,
"max": 0.19844057499999995,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20303689999999996,
"min": 0.20303689999999996,
"max": 0.5813122000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.577065499999997e-05,
"min": 8.577065499999997e-05,
"max": 0.0049221846925,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00017154130999999994,
"min": 0.00017154130999999994,
"max": 0.01406747878,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1693335148",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1693337628"
},
"total": 2479.666604753,
"count": 1,
"self": 0.7109321489997456,
"children": {
"run_training.setup": {
"total": 0.04259979299990846,
"count": 1,
"self": 0.04259979299990846
},
"TrainerController.start_learning": {
"total": 2478.913072811,
"count": 1,
"self": 4.405699163959071,
"children": {
"TrainerController._reset_env": {
"total": 4.35659011000007,
"count": 1,
"self": 4.35659011000007
},
"TrainerController.advance": {
"total": 2469.9622283650406,
"count": 231854,
"self": 4.412735680009064,
"children": {
"env_step": {
"total": 1901.4698940549665,
"count": 231854,
"self": 1605.5099666238443,
"children": {
"SubprocessEnvManager._take_step": {
"total": 293.0560828571462,
"count": 231854,
"self": 16.80319258809459,
"children": {
"TorchPolicy.evaluate": {
"total": 276.2528902690516,
"count": 222950,
"self": 276.2528902690516
}
}
},
"workers": {
"total": 2.9038445739759027,
"count": 231854,
"self": 0.0,
"children": {
"worker_root": {
"total": 2471.400189814978,
"count": 231854,
"is_parallel": true,
"self": 1159.107954207036,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001229309999985162,
"count": 1,
"is_parallel": true,
"self": 0.00031697200006419735,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0009123379999209646,
"count": 2,
"is_parallel": true,
"self": 0.0009123379999209646
}
}
},
"UnityEnvironment.step": {
"total": 0.04795201900003576,
"count": 1,
"is_parallel": true,
"self": 0.00037208900005225587,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002055549999795403,
"count": 1,
"is_parallel": true,
"self": 0.0002055549999795403
},
"communicator.exchange": {
"total": 0.04659944999991694,
"count": 1,
"is_parallel": true,
"self": 0.04659944999991694
},
"steps_from_proto": {
"total": 0.0007749250000870234,
"count": 1,
"is_parallel": true,
"self": 0.00020172400013507286,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005732009999519505,
"count": 2,
"is_parallel": true,
"self": 0.0005732009999519505
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1312.292235607942,
"count": 231853,
"is_parallel": true,
"self": 40.061854351981765,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 82.26023007710137,
"count": 231853,
"is_parallel": true,
"self": 82.26023007710137
},
"communicator.exchange": {
"total": 1091.2479880079616,
"count": 231853,
"is_parallel": true,
"self": 1091.2479880079616
},
"steps_from_proto": {
"total": 98.72216317089726,
"count": 231853,
"is_parallel": true,
"self": 34.59987522294614,
"children": {
"_process_rank_one_or_two_observation": {
"total": 64.12228794795112,
"count": 463706,
"is_parallel": true,
"self": 64.12228794795112
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 564.0795986300652,
"count": 231854,
"self": 6.746704230161754,
"children": {
"process_trajectory": {
"total": 138.73459480290217,
"count": 231854,
"self": 137.1854454659025,
"children": {
"RLTrainer._checkpoint": {
"total": 1.549149336999676,
"count": 10,
"self": 1.549149336999676
}
}
},
"_update_policy": {
"total": 418.5982995970013,
"count": 96,
"self": 357.0812161150093,
"children": {
"TorchPPOOptimizer.update": {
"total": 61.51708348199202,
"count": 2880,
"self": 61.51708348199202
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.151000105892308e-06,
"count": 1,
"self": 1.151000105892308e-06
},
"TrainerController._save_models": {
"total": 0.18855402100007268,
"count": 1,
"self": 0.0027178440000170667,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1858361770000556,
"count": 1,
"self": 0.1858361770000556
}
}
}
}
}
}
}