ppo-Huggy / run_logs /timers.json
jrauch4's picture
Huggy
831836d
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.405936598777771,
"min": 1.405936598777771,
"max": 1.4279758930206299,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71030.7265625,
"min": 68971.984375,
"max": 78260.578125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 98.29821073558648,
"min": 84.54358974358975,
"max": 380.75,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49444.0,
"min": 48894.0,
"max": 50259.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999851.0,
"min": 49804.0,
"max": 1999851.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999851.0,
"min": 49804.0,
"max": 1999851.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4140377044677734,
"min": 0.09922930598258972,
"max": 2.450228214263916,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1214.260986328125,
"min": 12.999038696289062,
"max": 1388.60498046875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7791157683605703,
"min": 1.801224200780155,
"max": 3.9519849332896144,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1900.8952314853668,
"min": 235.96037030220032,
"max": 2185.357919573784,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7791157683605703,
"min": 1.801224200780155,
"max": 3.9519849332896144,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1900.8952314853668,
"min": 235.96037030220032,
"max": 2185.357919573784,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016025023964498866,
"min": 0.01403059961157851,
"max": 0.020063904911431663,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04807507189349659,
"min": 0.029119760141475125,
"max": 0.06019171473429499,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.051660810328192176,
"min": 0.021314411672453085,
"max": 0.060358540154993534,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.15498243098457654,
"min": 0.04262882334490617,
"max": 0.17280518623689808,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.4222988592666634e-06,
"min": 3.4222988592666634e-06,
"max": 0.0002953296015568,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.026689657779999e-05,
"min": 1.026689657779999e-05,
"max": 0.0008439930186689999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10114073333333333,
"min": 0.10114073333333333,
"max": 0.1984432,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3034222,
"min": 0.20741845000000003,
"max": 0.5813309999999999,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.692259333333328e-05,
"min": 6.692259333333328e-05,
"max": 0.004922315679999999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020076777999999984,
"min": 0.00020076777999999984,
"max": 0.0140684169,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1672235594",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1672237797"
},
"total": 2202.7331866910004,
"count": 1,
"self": 0.3890647180000997,
"children": {
"run_training.setup": {
"total": 0.3105755469999849,
"count": 1,
"self": 0.3105755469999849
},
"TrainerController.start_learning": {
"total": 2202.033546426,
"count": 1,
"self": 3.8225638899771184,
"children": {
"TrainerController._reset_env": {
"total": 8.459978647000014,
"count": 1,
"self": 8.459978647000014
},
"TrainerController.advance": {
"total": 2189.6274729300226,
"count": 232080,
"self": 3.945035824858678,
"children": {
"env_step": {
"total": 1720.2220150721014,
"count": 232080,
"self": 1445.7185618431317,
"children": {
"SubprocessEnvManager._take_step": {
"total": 272.01081711299616,
"count": 232080,
"self": 13.923824901010278,
"children": {
"TorchPolicy.evaluate": {
"total": 258.0869922119859,
"count": 222984,
"self": 65.1004741589507,
"children": {
"TorchPolicy.sample_actions": {
"total": 192.98651805303518,
"count": 222984,
"self": 192.98651805303518
}
}
}
}
},
"workers": {
"total": 2.4926361159734256,
"count": 232080,
"self": 0.0,
"children": {
"worker_root": {
"total": 2194.558658069002,
"count": 232080,
"is_parallel": true,
"self": 1002.3634242570349,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002217052999981206,
"count": 1,
"is_parallel": true,
"self": 0.0003292279999982384,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018878249999829677,
"count": 2,
"is_parallel": true,
"self": 0.0018878249999829677
}
}
},
"UnityEnvironment.step": {
"total": 0.030811835000008614,
"count": 1,
"is_parallel": true,
"self": 0.00030102400000942,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001900440000213166,
"count": 1,
"is_parallel": true,
"self": 0.0001900440000213166
},
"communicator.exchange": {
"total": 0.02948705199997903,
"count": 1,
"is_parallel": true,
"self": 0.02948705199997903
},
"steps_from_proto": {
"total": 0.0008337149999988469,
"count": 1,
"is_parallel": true,
"self": 0.00027824199992210197,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005554730000767449,
"count": 2,
"is_parallel": true,
"self": 0.0005554730000767449
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1192.195233811967,
"count": 232079,
"is_parallel": true,
"self": 34.13120158597576,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 75.19968911098834,
"count": 232079,
"is_parallel": true,
"self": 75.19968911098834
},
"communicator.exchange": {
"total": 990.5227388709634,
"count": 232079,
"is_parallel": true,
"self": 990.5227388709634
},
"steps_from_proto": {
"total": 92.34160424403939,
"count": 232079,
"is_parallel": true,
"self": 37.67274472504823,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.668859518991155,
"count": 464158,
"is_parallel": true,
"self": 54.668859518991155
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 465.46042203306257,
"count": 232080,
"self": 5.830089984954043,
"children": {
"process_trajectory": {
"total": 142.4577582701089,
"count": 232080,
"self": 141.28144319310883,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1763150770000834,
"count": 10,
"self": 1.1763150770000834
}
}
},
"_update_policy": {
"total": 317.1725737779996,
"count": 97,
"self": 264.3151105120001,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.85746326599951,
"count": 2910,
"self": 52.85746326599951
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.73000169324223e-07,
"count": 1,
"self": 9.73000169324223e-07
},
"TrainerController._save_models": {
"total": 0.12352998600044884,
"count": 1,
"self": 0.001962277000075119,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12156770900037372,
"count": 1,
"self": 0.12156770900037372
}
}
}
}
}
}
}