ppo-Huggy / run_logs /timers.json
Nnarruqt's picture
Huggy
6a27115
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4038997888565063,
"min": 1.4038997888565063,
"max": 1.424918293952942,
"count": 26
},
"Huggy.Policy.Entropy.sum": {
"value": 69838.3984375,
"min": 69433.0859375,
"max": 76239.21875,
"count": 26
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 71.21820809248555,
"min": 68.0621546961326,
"max": 420.45,
"count": 26
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49283.0,
"min": 49227.0,
"max": 50454.0,
"count": 26
},
"Huggy.Step.mean": {
"value": 1299950.0,
"min": 49902.0,
"max": 1299950.0,
"count": 26
},
"Huggy.Step.sum": {
"value": 1299950.0,
"min": 49902.0,
"max": 1299950.0,
"count": 26
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.2132790088653564,
"min": -0.0050509232096374035,
"max": 2.2245519161224365,
"count": 26
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1531.589111328125,
"min": -0.6010598540306091,
"max": 1581.6563720703125,
"count": 26
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 4.016878309019039,
"min": 1.7115899226745637,
"max": 4.0645961599804785,
"count": 26
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2779.679789841175,
"min": 203.6792007982731,
"max": 2858.101459980011,
"count": 26
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 4.016878309019039,
"min": 1.7115899226745637,
"max": 4.0645961599804785,
"count": 26
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2779.679789841175,
"min": 203.6792007982731,
"max": 2858.101459980011,
"count": 26
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01818725565309352,
"min": 0.013398074267994768,
"max": 0.021402372522546405,
"count": 26
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.054561766959280555,
"min": 0.026796148535989536,
"max": 0.05517984676674435,
"count": 26
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.046650495049026276,
"min": 0.021238714829087256,
"max": 0.04905991993016667,
"count": 26
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.13995148514707884,
"min": 0.04247742965817451,
"max": 0.14717975979050002,
"count": 26
},
"Huggy.Policy.LearningRate.mean": {
"value": 0.00010884136371956666,
"min": 0.00010884136371956666,
"max": 0.00029536155154614996,
"count": 26
},
"Huggy.Policy.LearningRate.sum": {
"value": 0.00032652409115869997,
"min": 0.00023306652231119991,
"max": 0.0008444352185215998,
"count": 26
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.13628043333333334,
"min": 0.13628043333333334,
"max": 0.19845385000000001,
"count": 26
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.4088413,
"min": 0.27768880000000007,
"max": 0.5814784,
"count": 26
},
"Huggy.Policy.Beta.mean": {
"value": 0.0018203936233333335,
"min": 0.0018203936233333335,
"max": 0.004922847115,
"count": 26
},
"Huggy.Policy.Beta.sum": {
"value": 0.00546118087,
"min": 0.0038966711200000004,
"max": 0.014075772160000001,
"count": 26
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 26
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 26
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670528658",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670530796"
},
"total": 2138.3108221720004,
"count": 1,
"self": 0.26674200700017536,
"children": {
"run_training.setup": {
"total": 0.12684074300000248,
"count": 1,
"self": 0.12684074300000248
},
"TrainerController.start_learning": {
"total": 2137.917239422,
"count": 1,
"self": 4.547979873061649,
"children": {
"TrainerController._reset_env": {
"total": 11.157103425000003,
"count": 1,
"self": 11.157103425000003
},
"TrainerController.advance": {
"total": 2121.993839581938,
"count": 154886,
"self": 4.616334345897485,
"children": {
"env_step": {
"total": 1730.2653651370408,
"count": 154886,
"self": 1441.292321369046,
"children": {
"SubprocessEnvManager._take_step": {
"total": 285.74260372599457,
"count": 154886,
"self": 15.789832451023528,
"children": {
"TorchPolicy.evaluate": {
"total": 269.95277127497104,
"count": 148153,
"self": 70.06429841796637,
"children": {
"TorchPolicy.sample_actions": {
"total": 199.88847285700467,
"count": 148153,
"self": 199.88847285700467
}
}
}
}
},
"workers": {
"total": 3.230440042000282,
"count": 154885,
"self": 0.0,
"children": {
"worker_root": {
"total": 2129.5005112859862,
"count": 154885,
"is_parallel": true,
"self": 963.8439002089617,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018342470000334288,
"count": 1,
"is_parallel": true,
"self": 0.0003768180000633947,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001457428999970034,
"count": 2,
"is_parallel": true,
"self": 0.001457428999970034
}
}
},
"UnityEnvironment.step": {
"total": 0.03187894400002733,
"count": 1,
"is_parallel": true,
"self": 0.00030034900004238807,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020491199995831266,
"count": 1,
"is_parallel": true,
"self": 0.00020491199995831266
},
"communicator.exchange": {
"total": 0.030557170999998107,
"count": 1,
"is_parallel": true,
"self": 0.030557170999998107
},
"steps_from_proto": {
"total": 0.0008165120000285242,
"count": 1,
"is_parallel": true,
"self": 0.0002890360000264991,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005274760000020251,
"count": 2,
"is_parallel": true,
"self": 0.0005274760000020251
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1165.6566110770245,
"count": 154884,
"is_parallel": true,
"self": 32.519965384932675,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 68.38158529300762,
"count": 154884,
"is_parallel": true,
"self": 68.38158529300762
},
"communicator.exchange": {
"total": 977.8697659180075,
"count": 154884,
"is_parallel": true,
"self": 977.8697659180075
},
"steps_from_proto": {
"total": 86.88529448107664,
"count": 154884,
"is_parallel": true,
"self": 35.84984210203618,
"children": {
"_process_rank_one_or_two_observation": {
"total": 51.035452379040464,
"count": 309768,
"is_parallel": true,
"self": 51.035452379040464
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 387.11214009900004,
"count": 154885,
"self": 7.397319591023802,
"children": {
"process_trajectory": {
"total": 137.52867146497624,
"count": 154885,
"self": 137.2365648679763,
"children": {
"RLTrainer._checkpoint": {
"total": 0.292106596999929,
"count": 2,
"self": 0.292106596999929
}
}
},
"_update_policy": {
"total": 242.186149043,
"count": 64,
"self": 201.8716260270042,
"children": {
"TorchPPOOptimizer.update": {
"total": 40.3145230159958,
"count": 1920,
"self": 40.3145230159958
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.8020000425167382e-06,
"count": 1,
"self": 1.8020000425167382e-06
},
"TrainerController._save_models": {
"total": 0.2183147400000962,
"count": 1,
"self": 0.0031105380003282335,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21520420199976797,
"count": 1,
"self": 0.21520420199976797
}
}
}
}
}
}
}