ppo-Huggy / run_logs /timers.json
willnguyen's picture
Huggy
1a523ab
raw
history blame contribute delete
No virus
17.4 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4032707214355469,
"min": 1.403261661529541,
"max": 1.4250622987747192,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69739.75,
"min": 68135.9296875,
"max": 75576.1796875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 97.05098039215686,
"min": 83.78983050847458,
"max": 383.82442748091603,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49496.0,
"min": 48896.0,
"max": 50281.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999940.0,
"min": 49668.0,
"max": 1999940.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999940.0,
"min": 49668.0,
"max": 1999940.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3962836265563965,
"min": -0.0017804146045818925,
"max": 2.472660779953003,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1222.1046142578125,
"min": -0.23145389556884766,
"max": 1403.837646484375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.683290399640214,
"min": 1.846735902932974,
"max": 3.983600033558299,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1878.4781038165092,
"min": 240.07566738128662,
"max": 2243.007387459278,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.683290399640214,
"min": 1.846735902932974,
"max": 3.983600033558299,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1878.4781038165092,
"min": 240.07566738128662,
"max": 2243.007387459278,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017758189081602416,
"min": 0.01298720244303695,
"max": 0.021571641539533934,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05327456724480725,
"min": 0.0259744048860739,
"max": 0.056440781962980205,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05415238278607528,
"min": 0.0230345852052172,
"max": 0.059517034267385804,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16245714835822583,
"min": 0.0460691704104344,
"max": 0.1785511028021574,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.5919488027166645e-06,
"min": 3.5919488027166645e-06,
"max": 0.0002953446015518,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0775846408149993e-05,
"min": 1.0775846408149993e-05,
"max": 0.0008443086185637999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10119728333333335,
"min": 0.10119728333333335,
"max": 0.19844819999999994,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30359185000000005,
"min": 0.20757054999999996,
"max": 0.5814362,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.974443833333327e-05,
"min": 6.974443833333327e-05,
"max": 0.00492256518,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002092333149999998,
"min": 0.0002092333149999998,
"max": 0.014073666379999999,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679020137",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679022499"
},
"total": 2362.33558904,
"count": 1,
"self": 0.4392089019997911,
"children": {
"run_training.setup": {
"total": 0.11315042899997252,
"count": 1,
"self": 0.11315042899997252
},
"TrainerController.start_learning": {
"total": 2361.783229709,
"count": 1,
"self": 4.182974051008387,
"children": {
"TrainerController._reset_env": {
"total": 9.781671828000015,
"count": 1,
"self": 9.781671828000015
},
"TrainerController.advance": {
"total": 2347.698180225991,
"count": 232020,
"self": 4.668575520038303,
"children": {
"env_step": {
"total": 1829.4950249829658,
"count": 232020,
"self": 1546.3367647738837,
"children": {
"SubprocessEnvManager._take_step": {
"total": 280.3233146800009,
"count": 232020,
"self": 16.497791815911796,
"children": {
"TorchPolicy.evaluate": {
"total": 263.8255228640891,
"count": 222960,
"self": 263.8255228640891
}
}
},
"workers": {
"total": 2.8349455290812102,
"count": 232020,
"self": 0.0,
"children": {
"worker_root": {
"total": 2353.865021377006,
"count": 232020,
"is_parallel": true,
"self": 1094.658316326136,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001040264999971896,
"count": 1,
"is_parallel": true,
"self": 0.0002628359999903296,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007774289999815664,
"count": 2,
"is_parallel": true,
"self": 0.0007774289999815664
}
}
},
"UnityEnvironment.step": {
"total": 0.028637280999987524,
"count": 1,
"is_parallel": true,
"self": 0.00035992200002965546,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00021906599999965692,
"count": 1,
"is_parallel": true,
"self": 0.00021906599999965692
},
"communicator.exchange": {
"total": 0.02726431799999318,
"count": 1,
"is_parallel": true,
"self": 0.02726431799999318
},
"steps_from_proto": {
"total": 0.0007939749999650303,
"count": 1,
"is_parallel": true,
"self": 0.000260447999949065,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005335270000159653,
"count": 2,
"is_parallel": true,
"self": 0.0005335270000159653
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1259.2067050508701,
"count": 232019,
"is_parallel": true,
"self": 39.022764335852116,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 77.30726621603156,
"count": 232019,
"is_parallel": true,
"self": 77.30726621603156
},
"communicator.exchange": {
"total": 1053.7944533869336,
"count": 232019,
"is_parallel": true,
"self": 1053.7944533869336
},
"steps_from_proto": {
"total": 89.08222111205276,
"count": 232019,
"is_parallel": true,
"self": 33.61858931192165,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.46363180013111,
"count": 464038,
"is_parallel": true,
"self": 55.46363180013111
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 513.5345797229868,
"count": 232020,
"self": 6.8641411029967685,
"children": {
"process_trajectory": {
"total": 140.18766184399078,
"count": 232020,
"self": 138.96617837199017,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2214834720006138,
"count": 10,
"self": 1.2214834720006138
}
}
},
"_update_policy": {
"total": 366.48277677599924,
"count": 97,
"self": 307.8114412959957,
"children": {
"TorchPPOOptimizer.update": {
"total": 58.67133548000356,
"count": 2910,
"self": 58.67133548000356
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.120003596763127e-07,
"count": 1,
"self": 9.120003596763127e-07
},
"TrainerController._save_models": {
"total": 0.12040269199997056,
"count": 1,
"self": 0.002755480999894644,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11764721100007591,
"count": 1,
"self": 0.11764721100007591
}
}
}
}
}
}
}