ppo-Huggy / run_logs /timers.json
victormmp1's picture
Huggy
8081170
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4024274349212646,
"min": 1.4024274349212646,
"max": 1.4265638589859009,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70153.625,
"min": 68620.1796875,
"max": 77284.828125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 80.60195758564437,
"min": 77.46153846153847,
"max": 392.74015748031496,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49409.0,
"min": 49096.0,
"max": 50433.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999927.0,
"min": 49988.0,
"max": 1999927.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999927.0,
"min": 49988.0,
"max": 1999927.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.508275270462036,
"min": 0.12733498215675354,
"max": 2.508275270462036,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1537.57275390625,
"min": 16.044208526611328,
"max": 1556.33642578125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.9064133249916027,
"min": 1.6908568903094245,
"max": 3.9869881114224808,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2394.6313682198524,
"min": 213.0479681789875,
"max": 2467.919152677059,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.9064133249916027,
"min": 1.6908568903094245,
"max": 3.9869881114224808,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2394.6313682198524,
"min": 213.0479681789875,
"max": 2467.919152677059,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017141430794840238,
"min": 0.013304772768363667,
"max": 0.019576360335728772,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05142429238452072,
"min": 0.026609545536727335,
"max": 0.05697070937846244,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05309478297001785,
"min": 0.020414066532005865,
"max": 0.06050681496659915,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.15928434891005355,
"min": 0.04082813306401173,
"max": 0.17948120025297004,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.5693988102333307e-06,
"min": 3.5693988102333307e-06,
"max": 0.000295276426574525,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0708196430699992e-05,
"min": 1.0708196430699992e-05,
"max": 0.0008438037187320998,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10118976666666667,
"min": 0.10118976666666667,
"max": 0.19842547499999993,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3035693,
"min": 0.2075163,
"max": 0.5812679,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.936935666666663e-05,
"min": 6.936935666666663e-05,
"max": 0.004921431202500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020810806999999986,
"min": 0.00020810806999999986,
"max": 0.014065268210000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1671629782",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1671631961"
},
"total": 2178.659919618,
"count": 1,
"self": 0.3871541320004326,
"children": {
"run_training.setup": {
"total": 0.11421816000000717,
"count": 1,
"self": 0.11421816000000717
},
"TrainerController.start_learning": {
"total": 2178.1585473259997,
"count": 1,
"self": 3.584698171835498,
"children": {
"TrainerController._reset_env": {
"total": 8.356933104999996,
"count": 1,
"self": 8.356933104999996
},
"TrainerController.advance": {
"total": 2166.104016406164,
"count": 233026,
"self": 3.832270423119553,
"children": {
"env_step": {
"total": 1699.5573915460309,
"count": 233026,
"self": 1431.081252985044,
"children": {
"SubprocessEnvManager._take_step": {
"total": 266.0312287159685,
"count": 233026,
"self": 13.68852785801812,
"children": {
"TorchPolicy.evaluate": {
"total": 252.34270085795038,
"count": 222924,
"self": 63.39850466797577,
"children": {
"TorchPolicy.sample_actions": {
"total": 188.9441961899746,
"count": 222924,
"self": 188.9441961899746
}
}
}
}
},
"workers": {
"total": 2.4449098450181737,
"count": 233026,
"self": 0.0,
"children": {
"worker_root": {
"total": 2170.42952545696,
"count": 233026,
"is_parallel": true,
"self": 991.3190265461294,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002150382999957401,
"count": 1,
"is_parallel": true,
"self": 0.0003206289999866385,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018297539999707624,
"count": 2,
"is_parallel": true,
"self": 0.0018297539999707624
}
}
},
"UnityEnvironment.step": {
"total": 0.02641098199995895,
"count": 1,
"is_parallel": true,
"self": 0.0003062109998950291,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00016923700002280384,
"count": 1,
"is_parallel": true,
"self": 0.00016923700002280384
},
"communicator.exchange": {
"total": 0.025235633000022517,
"count": 1,
"is_parallel": true,
"self": 0.025235633000022517
},
"steps_from_proto": {
"total": 0.000699901000018599,
"count": 1,
"is_parallel": true,
"self": 0.00022644900002433133,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004734519999942677,
"count": 2,
"is_parallel": true,
"self": 0.0004734519999942677
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1179.1104989108308,
"count": 233025,
"is_parallel": true,
"self": 33.967504459721795,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 74.30542899799491,
"count": 233025,
"is_parallel": true,
"self": 74.30542899799491
},
"communicator.exchange": {
"total": 980.1383388090777,
"count": 233025,
"is_parallel": true,
"self": 980.1383388090777
},
"steps_from_proto": {
"total": 90.6992266440364,
"count": 233025,
"is_parallel": true,
"self": 37.218529018138156,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.480697625898244,
"count": 466050,
"is_parallel": true,
"self": 53.480697625898244
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 462.7143544370137,
"count": 233026,
"self": 6.037760518985408,
"children": {
"process_trajectory": {
"total": 147.60064143103006,
"count": 233026,
"self": 146.44513149403036,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1555099369996924,
"count": 10,
"self": 1.1555099369996924
}
}
},
"_update_policy": {
"total": 309.07595248699823,
"count": 97,
"self": 256.3538094319913,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.72214305500694,
"count": 2910,
"self": 52.72214305500694
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.110003702517133e-07,
"count": 1,
"self": 9.110003702517133e-07
},
"TrainerController._save_models": {
"total": 0.11289873199984868,
"count": 1,
"self": 0.001901040999655379,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1109976910001933,
"count": 1,
"self": 0.1109976910001933
}
}
}
}
}
}
}