ppo-Huggy / run_logs /timers.json
jackmedda's picture
Huggy
05b2059
raw
history blame contribute delete
No virus
17.9 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4130795001983643,
"min": 1.4130795001983643,
"max": 1.4301551580429077,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70528.2109375,
"min": 68275.453125,
"max": 77250.1953125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 88.94973070017953,
"min": 84.25724020442931,
"max": 388.1007751937984,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49545.0,
"min": 48919.0,
"max": 50065.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999976.0,
"min": 49445.0,
"max": 1999976.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999976.0,
"min": 49445.0,
"max": 1999976.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4726390838623047,
"min": 0.09838388115167618,
"max": 2.528179407119751,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1377.260009765625,
"min": 12.59313678741455,
"max": 1450.2562255859375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.869561657550305,
"min": 1.842696403618902,
"max": 4.0352122837439515,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2155.34584325552,
"min": 235.86513966321945,
"max": 2299.816231548786,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.869561657550305,
"min": 1.842696403618902,
"max": 4.0352122837439515,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2155.34584325552,
"min": 235.86513966321945,
"max": 2299.816231548786,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.019932418193881554,
"min": 0.013090171909425408,
"max": 0.020502616971498355,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05979725458164466,
"min": 0.026180343818850815,
"max": 0.05979725458164466,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05612211161189609,
"min": 0.020816151642551024,
"max": 0.05612211161189609,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16836633483568827,
"min": 0.04163230328510205,
"max": 0.16836633483568827,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.468398843900003e-06,
"min": 3.468398843900003e-06,
"max": 0.00029534940155019997,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0405196531700009e-05,
"min": 1.0405196531700009e-05,
"max": 0.0008441842686052499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10115610000000004,
"min": 0.10115610000000004,
"max": 0.19844980000000004,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30346830000000014,
"min": 0.2074829,
"max": 0.5813947499999998,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.768939000000004e-05,
"min": 6.768939000000004e-05,
"max": 0.004922645020000001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020306817000000012,
"min": 0.00020306817000000012,
"max": 0.014071598024999998,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676732521",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1676737369"
},
"total": 4848.056114968,
"count": 1,
"self": 0.6429651729995385,
"children": {
"run_training.setup": {
"total": 0.1511821779999991,
"count": 1,
"self": 0.1511821779999991
},
"TrainerController.start_learning": {
"total": 4847.261967617,
"count": 1,
"self": 9.20656827005132,
"children": {
"TrainerController._reset_env": {
"total": 7.5908657070000345,
"count": 1,
"self": 7.5908657070000345
},
"TrainerController.advance": {
"total": 4830.326530987948,
"count": 232227,
"self": 8.727051947141263,
"children": {
"env_step": {
"total": 3116.6399062969035,
"count": 232227,
"self": 2626.9073079440623,
"children": {
"SubprocessEnvManager._take_step": {
"total": 483.8102556459661,
"count": 232227,
"self": 29.427451074835915,
"children": {
"TorchPolicy.evaluate": {
"total": 454.3828045711302,
"count": 222941,
"self": 65.40981705814181,
"children": {
"TorchPolicy.sample_actions": {
"total": 388.97298751298837,
"count": 222941,
"self": 388.97298751298837
}
}
}
}
},
"workers": {
"total": 5.922342706874986,
"count": 232227,
"self": 0.0,
"children": {
"worker_root": {
"total": 4830.626043328077,
"count": 232227,
"is_parallel": true,
"self": 2724.121282414045,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0029660460000400235,
"count": 1,
"is_parallel": true,
"self": 0.0007359930000347958,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0022300530000052277,
"count": 2,
"is_parallel": true,
"self": 0.0022300530000052277
}
}
},
"UnityEnvironment.step": {
"total": 0.053647538999996414,
"count": 1,
"is_parallel": true,
"self": 0.000428968999983681,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003021530000069106,
"count": 1,
"is_parallel": true,
"self": 0.0003021530000069106
},
"communicator.exchange": {
"total": 0.051862497000001895,
"count": 1,
"is_parallel": true,
"self": 0.051862497000001895
},
"steps_from_proto": {
"total": 0.0010539200000039273,
"count": 1,
"is_parallel": true,
"self": 0.0003697939999938171,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006841260000101101,
"count": 2,
"is_parallel": true,
"self": 0.0006841260000101101
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2106.504760914032,
"count": 232226,
"is_parallel": true,
"self": 65.12886131303003,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 92.33343831199147,
"count": 232226,
"is_parallel": true,
"self": 92.33343831199147
},
"communicator.exchange": {
"total": 1790.0914907761037,
"count": 232226,
"is_parallel": true,
"self": 1790.0914907761037
},
"steps_from_proto": {
"total": 158.95097051290668,
"count": 232226,
"is_parallel": true,
"self": 58.59085525584277,
"children": {
"_process_rank_one_or_two_observation": {
"total": 100.36011525706391,
"count": 464452,
"is_parallel": true,
"self": 100.36011525706391
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1704.9595727439032,
"count": 232227,
"self": 14.76242686904402,
"children": {
"process_trajectory": {
"total": 282.30792479686284,
"count": 232227,
"self": 280.87939167486275,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4285331220000899,
"count": 10,
"self": 1.4285331220000899
}
}
},
"_update_policy": {
"total": 1407.8892210779964,
"count": 97,
"self": 373.9105887719852,
"children": {
"TorchPPOOptimizer.update": {
"total": 1033.9786323060111,
"count": 2910,
"self": 1033.9786323060111
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.099999619531445e-06,
"count": 1,
"self": 2.099999619531445e-06
},
"TrainerController._save_models": {
"total": 0.1380005520004488,
"count": 1,
"self": 0.003092623000156891,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13490792900029192,
"count": 1,
"self": 0.13490792900029192
}
}
}
}
}
}
}