ppo-Huggy / run_logs /timers.json
AMI0x's picture
Huggy
a1cd996
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4053606986999512,
"min": 1.4053606986999512,
"max": 1.4255105257034302,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70509.7578125,
"min": 68182.71875,
"max": 78667.828125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 108.4451754385965,
"min": 76.31684698608964,
"max": 381.47727272727275,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49451.0,
"min": 48847.0,
"max": 50355.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999897.0,
"min": 49849.0,
"max": 1999897.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999897.0,
"min": 49849.0,
"max": 1999897.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3696491718292236,
"min": 0.08209816366434097,
"max": 2.4646270275115967,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1080.56005859375,
"min": 10.754859924316406,
"max": 1561.366455078125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.5328398336443985,
"min": 1.695553164445717,
"max": 3.974897479330461,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1610.9749641418457,
"min": 222.11746454238892,
"max": 2455.429851949215,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.5328398336443985,
"min": 1.695553164445717,
"max": 3.974897479330461,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1610.9749641418457,
"min": 222.11746454238892,
"max": 2455.429851949215,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01794922594999662,
"min": 0.013998484487011511,
"max": 0.021039786383820078,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05384767784998985,
"min": 0.03081378785476166,
"max": 0.06311935915146023,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.049491981334156464,
"min": 0.021841629253079496,
"max": 0.05739608456691106,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.14847594400246938,
"min": 0.04368325850615899,
"max": 0.1710285450021426,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.286298904600007e-06,
"min": 3.286298904600007e-06,
"max": 0.00029532202655932493,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.85889671380002e-06,
"min": 9.85889671380002e-06,
"max": 0.0008439747186750998,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.1010954,
"min": 0.1010954,
"max": 0.198440675,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3032862,
"min": 0.20736904999999994,
"max": 0.5813249,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.466046000000011e-05,
"min": 6.466046000000011e-05,
"max": 0.0049221896825,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019398138000000032,
"min": 0.00019398138000000032,
"max": 0.014068112510000001,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1680970184",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1680972615"
},
"total": 2430.9197570300003,
"count": 1,
"self": 0.7413433420001638,
"children": {
"run_training.setup": {
"total": 0.11074354699997002,
"count": 1,
"self": 0.11074354699997002
},
"TrainerController.start_learning": {
"total": 2430.0676701410002,
"count": 1,
"self": 4.4772201469586435,
"children": {
"TrainerController._reset_env": {
"total": 3.9968345260000433,
"count": 1,
"self": 3.9968345260000433
},
"TrainerController.advance": {
"total": 2421.3920585740416,
"count": 232593,
"self": 4.941469256021264,
"children": {
"env_step": {
"total": 1904.4902522670131,
"count": 232593,
"self": 1614.796305447047,
"children": {
"SubprocessEnvManager._take_step": {
"total": 286.70980720705234,
"count": 232593,
"self": 17.643615697113432,
"children": {
"TorchPolicy.evaluate": {
"total": 269.0661915099389,
"count": 223037,
"self": 269.0661915099389
}
}
},
"workers": {
"total": 2.9841396129137365,
"count": 232593,
"self": 0.0,
"children": {
"worker_root": {
"total": 2421.672096019928,
"count": 232593,
"is_parallel": true,
"self": 1102.6373573968544,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009648889999880339,
"count": 1,
"is_parallel": true,
"self": 0.00027333000002727204,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006915589999607619,
"count": 2,
"is_parallel": true,
"self": 0.0006915589999607619
}
}
},
"UnityEnvironment.step": {
"total": 0.036444650000021284,
"count": 1,
"is_parallel": true,
"self": 0.00033982600018589437,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00024193799993099674,
"count": 1,
"is_parallel": true,
"self": 0.00024193799993099674
},
"communicator.exchange": {
"total": 0.03504486499991799,
"count": 1,
"is_parallel": true,
"self": 0.03504486499991799
},
"steps_from_proto": {
"total": 0.0008180209999864019,
"count": 1,
"is_parallel": true,
"self": 0.00022097399994436273,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005970470000420391,
"count": 2,
"is_parallel": true,
"self": 0.0005970470000420391
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1319.0347386230737,
"count": 232592,
"is_parallel": true,
"self": 38.80159185099478,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 85.45931436394528,
"count": 232592,
"is_parallel": true,
"self": 85.45931436394528
},
"communicator.exchange": {
"total": 1101.112656385104,
"count": 232592,
"is_parallel": true,
"self": 1101.112656385104
},
"steps_from_proto": {
"total": 93.66117602302961,
"count": 232592,
"is_parallel": true,
"self": 37.69708416409833,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.96409185893128,
"count": 465184,
"is_parallel": true,
"self": 55.96409185893128
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 511.96033705100706,
"count": 232593,
"self": 6.811585958118826,
"children": {
"process_trajectory": {
"total": 139.3068606008859,
"count": 232593,
"self": 138.05018583688616,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2566747639997402,
"count": 8,
"self": 1.2566747639997402
}
}
},
"_update_policy": {
"total": 365.84189049200234,
"count": 97,
"self": 307.80789676399763,
"children": {
"TorchPPOOptimizer.update": {
"total": 58.0339937280047,
"count": 2910,
"self": 58.0339937280047
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5410000742122065e-06,
"count": 1,
"self": 1.5410000742122065e-06
},
"TrainerController._save_models": {
"total": 0.20155535300000338,
"count": 1,
"self": 0.0031017239998618606,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19845362900014152,
"count": 1,
"self": 0.19845362900014152
}
}
}
}
}
}
}