ppo-Huggy / run_logs /timers.json
LukeSajkowski's picture
Huggy
01796d0
raw
history blame
17.9 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4009262323379517,
"min": 1.4009262323379517,
"max": 1.427578330039978,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 68886.34375,
"min": 68202.59375,
"max": 76983.4609375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 80.17857142857143,
"min": 79.7373572593801,
"max": 396.0,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49390.0,
"min": 48879.0,
"max": 50292.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999904.0,
"min": 49925.0,
"max": 1999904.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999904.0,
"min": 49925.0,
"max": 1999904.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.436591148376465,
"min": 0.12015644460916519,
"max": 2.4944279193878174,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1500.940185546875,
"min": 15.1397123336792,
"max": 1536.567626953125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7045986868150824,
"min": 1.7023673848736853,
"max": 3.9520273820930987,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2282.0327910780907,
"min": 214.49829049408436,
"max": 2423.221585690975,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7045986868150824,
"min": 1.7023673848736853,
"max": 3.9520273820930987,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2282.0327910780907,
"min": 214.49829049408436,
"max": 2423.221585690975,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016279737923953996,
"min": 0.013769613095306948,
"max": 0.019777205047709384,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04883921377186198,
"min": 0.028829436399488866,
"max": 0.05677547647501342,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.054274493952592213,
"min": 0.021654841986795265,
"max": 0.058442500771747695,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16282348185777665,
"min": 0.04330968397359053,
"max": 0.17532750231524308,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.810748729783326e-06,
"min": 3.810748729783326e-06,
"max": 0.0002953338015554,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1432246189349978e-05,
"min": 1.1432246189349978e-05,
"max": 0.0008438100187299999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10127021666666665,
"min": 0.10127021666666665,
"max": 0.19844459999999997,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30381064999999996,
"min": 0.20766645,
"max": 0.5812699999999998,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.338381166666656e-05,
"min": 7.338381166666656e-05,
"max": 0.004922385540000001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00022015143499999967,
"min": 0.00022015143499999967,
"max": 0.014065372999999999,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670865621",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670867768"
},
"total": 2146.5872289830004,
"count": 1,
"self": 0.3891846289998284,
"children": {
"run_training.setup": {
"total": 0.10626759600006608,
"count": 1,
"self": 0.10626759600006608
},
"TrainerController.start_learning": {
"total": 2146.0917767580004,
"count": 1,
"self": 3.784405380997214,
"children": {
"TrainerController._reset_env": {
"total": 9.850729608000051,
"count": 1,
"self": 9.850729608000051
},
"TrainerController.advance": {
"total": 2132.3446635980026,
"count": 232638,
"self": 4.051167041028748,
"children": {
"env_step": {
"total": 1672.1709022290472,
"count": 232638,
"self": 1407.7497174110922,
"children": {
"SubprocessEnvManager._take_step": {
"total": 261.9530824339596,
"count": 232638,
"self": 13.96712073099377,
"children": {
"TorchPolicy.evaluate": {
"total": 247.98596170296582,
"count": 222894,
"self": 62.64238851592131,
"children": {
"TorchPolicy.sample_actions": {
"total": 185.3435731870445,
"count": 222894,
"self": 185.3435731870445
}
}
}
}
},
"workers": {
"total": 2.4681023839953014,
"count": 232638,
"self": 0.0,
"children": {
"worker_root": {
"total": 2138.56698010401,
"count": 232638,
"is_parallel": true,
"self": 977.0989391269916,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001830715999972199,
"count": 1,
"is_parallel": true,
"self": 0.0002993440000409464,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015313719999312525,
"count": 2,
"is_parallel": true,
"self": 0.0015313719999312525
}
}
},
"UnityEnvironment.step": {
"total": 0.02671823800005768,
"count": 1,
"is_parallel": true,
"self": 0.0002705320000586653,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00022764599998481572,
"count": 1,
"is_parallel": true,
"self": 0.00022764599998481572
},
"communicator.exchange": {
"total": 0.025542299999983697,
"count": 1,
"is_parallel": true,
"self": 0.025542299999983697
},
"steps_from_proto": {
"total": 0.0006777600000305029,
"count": 1,
"is_parallel": true,
"self": 0.0002265470000111236,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00045121300001937925,
"count": 2,
"is_parallel": true,
"self": 0.00045121300001937925
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1161.4680409770183,
"count": 232637,
"is_parallel": true,
"self": 33.83570332513068,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 74.46047425795575,
"count": 232637,
"is_parallel": true,
"self": 74.46047425795575
},
"communicator.exchange": {
"total": 963.4713108729394,
"count": 232637,
"is_parallel": true,
"self": 963.4713108729394
},
"steps_from_proto": {
"total": 89.70055252099257,
"count": 232637,
"is_parallel": true,
"self": 36.864444117923426,
"children": {
"_process_rank_one_or_two_observation": {
"total": 52.83610840306915,
"count": 465274,
"is_parallel": true,
"self": 52.83610840306915
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 456.12259432792644,
"count": 232638,
"self": 5.60258689691932,
"children": {
"process_trajectory": {
"total": 143.56875733800598,
"count": 232638,
"self": 143.10894722900616,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4598101089998181,
"count": 4,
"self": 0.4598101089998181
}
}
},
"_update_policy": {
"total": 306.95125009300114,
"count": 97,
"self": 254.85160683200036,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.09964326100078,
"count": 2910,
"self": 52.09964326100078
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.55000359681435e-07,
"count": 1,
"self": 9.55000359681435e-07
},
"TrainerController._save_models": {
"total": 0.11197721600001387,
"count": 1,
"self": 0.0020785170004273823,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10989869899958649,
"count": 1,
"self": 0.10989869899958649
}
}
}
}
}
}
}