ppo-Huggy / run_logs /timers.json
Jyothishwar's picture
Huggy
6f7490b verified
raw
history blame contribute delete
No virus
17.5 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4063880443572998,
"min": 1.4063880443572998,
"max": 1.4280123710632324,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69022.7109375,
"min": 67943.3671875,
"max": 77700.2265625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 100.20242914979757,
"min": 80.63071895424837,
"max": 397.88188976377955,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49500.0,
"min": 48805.0,
"max": 50531.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999973.0,
"min": 49995.0,
"max": 1999973.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999973.0,
"min": 49995.0,
"max": 1999973.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4055700302124023,
"min": 0.2757219970226288,
"max": 2.4262635707855225,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1188.3515625,
"min": 34.740970611572266,
"max": 1450.77685546875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.6594449057270158,
"min": 1.7627363808098293,
"max": 3.9517849013460205,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1807.7657834291458,
"min": 222.1047839820385,
"max": 2347.360231399536,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.6594449057270158,
"min": 1.7627363808098293,
"max": 3.9517849013460205,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1807.7657834291458,
"min": 222.1047839820385,
"max": 2347.360231399536,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.014390847869253773,
"min": 0.013039905866086578,
"max": 0.021826041363480424,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04317254360776132,
"min": 0.026079811732173157,
"max": 0.05784549708090102,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04929005168378353,
"min": 0.019915925959746044,
"max": 0.05241654569076167,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1478701550513506,
"min": 0.03983185191949209,
"max": 0.157249637072285,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.3453988848999936e-06,
"min": 3.3453988848999936e-06,
"max": 0.0002953446015517999,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0036196654699981e-05,
"min": 1.0036196654699981e-05,
"max": 0.0008439820686726499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10111510000000001,
"min": 0.10111510000000001,
"max": 0.19844820000000002,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30334530000000004,
"min": 0.20738374999999998,
"max": 0.5813273499999999,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.564348999999993e-05,
"min": 6.564348999999993e-05,
"max": 0.0049225651799999996,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0001969304699999998,
"min": 0.0001969304699999998,
"max": 0.014068234764999998,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1711363064",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1711365628"
},
"total": 2564.6766432020004,
"count": 1,
"self": 0.4430240240008061,
"children": {
"run_training.setup": {
"total": 0.0822040969999307,
"count": 1,
"self": 0.0822040969999307
},
"TrainerController.start_learning": {
"total": 2564.151415081,
"count": 1,
"self": 4.946883093024553,
"children": {
"TrainerController._reset_env": {
"total": 2.981782206999924,
"count": 1,
"self": 2.981782206999924
},
"TrainerController.advance": {
"total": 2556.1058000039757,
"count": 232117,
"self": 5.059691178963931,
"children": {
"env_step": {
"total": 2059.8231920990943,
"count": 232117,
"self": 1707.640693808074,
"children": {
"SubprocessEnvManager._take_step": {
"total": 348.88583408603495,
"count": 232117,
"self": 18.639614157988944,
"children": {
"TorchPolicy.evaluate": {
"total": 330.246219928046,
"count": 222978,
"self": 330.246219928046
}
}
},
"workers": {
"total": 3.296664204985177,
"count": 232117,
"self": 0.0,
"children": {
"worker_root": {
"total": 2556.511436407847,
"count": 232117,
"is_parallel": true,
"self": 1181.9643697598767,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009663810000120066,
"count": 1,
"is_parallel": true,
"self": 0.00028515100007098226,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006812299999410243,
"count": 2,
"is_parallel": true,
"self": 0.0006812299999410243
}
}
},
"UnityEnvironment.step": {
"total": 0.03140544100006082,
"count": 1,
"is_parallel": true,
"self": 0.0003695720000678193,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002136030000201572,
"count": 1,
"is_parallel": true,
"self": 0.0002136030000201572
},
"communicator.exchange": {
"total": 0.030053416999976434,
"count": 1,
"is_parallel": true,
"self": 0.030053416999976434
},
"steps_from_proto": {
"total": 0.0007688489999964077,
"count": 1,
"is_parallel": true,
"self": 0.00021853500004453963,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000550313999951868,
"count": 2,
"is_parallel": true,
"self": 0.000550313999951868
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1374.5470666479705,
"count": 232116,
"is_parallel": true,
"self": 41.32263772495071,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 92.16361531995847,
"count": 232116,
"is_parallel": true,
"self": 92.16361531995847
},
"communicator.exchange": {
"total": 1141.930236330079,
"count": 232116,
"is_parallel": true,
"self": 1141.930236330079
},
"steps_from_proto": {
"total": 99.13057727298258,
"count": 232116,
"is_parallel": true,
"self": 37.95601494095331,
"children": {
"_process_rank_one_or_two_observation": {
"total": 61.17456233202927,
"count": 464232,
"is_parallel": true,
"self": 61.17456233202927
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 491.22291672591734,
"count": 232117,
"self": 7.437410417919182,
"children": {
"process_trajectory": {
"total": 164.8910756569976,
"count": 232117,
"self": 163.61179836599672,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2792772910008807,
"count": 10,
"self": 1.2792772910008807
}
}
},
"_update_policy": {
"total": 318.89443065100056,
"count": 97,
"self": 255.0816976920055,
"children": {
"TorchPPOOptimizer.update": {
"total": 63.81273295899507,
"count": 2910,
"self": 63.81273295899507
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.809999260643963e-07,
"count": 1,
"self": 7.809999260643963e-07
},
"TrainerController._save_models": {
"total": 0.1169489959997918,
"count": 1,
"self": 0.0019708089994310285,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11497818700036078,
"count": 1,
"self": 0.11497818700036078
}
}
}
}
}
}
}