ppo-Huggy / run_logs /timers.json
jakubgajski's picture
RL course - Huggy
3a9c059
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4126367568969727,
"min": 1.4126367568969727,
"max": 1.4285238981246948,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71688.4921875,
"min": 69322.8203125,
"max": 77158.6953125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 84.65523156089193,
"min": 78.61980830670926,
"max": 387.06976744186045,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49354.0,
"min": 49216.0,
"max": 50265.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999937.0,
"min": 49597.0,
"max": 1999937.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999937.0,
"min": 49597.0,
"max": 1999937.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4434092044830322,
"min": 0.21269938349723816,
"max": 2.521256923675537,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1424.507568359375,
"min": 27.225521087646484,
"max": 1504.262451171875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7342248518102985,
"min": 1.7168561443686485,
"max": 3.972532258503932,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2177.053088605404,
"min": 219.757586479187,
"max": 2308.5028245449066,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7342248518102985,
"min": 1.7168561443686485,
"max": 3.972532258503932,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2177.053088605404,
"min": 219.757586479187,
"max": 2308.5028245449066,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016184454681853016,
"min": 0.013784410608786856,
"max": 0.020395406239549628,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.048553364045559044,
"min": 0.027568821217573713,
"max": 0.058075285031615444,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05653673422833284,
"min": 0.021454034031679232,
"max": 0.06054357290267944,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16961020268499852,
"min": 0.042908068063358465,
"max": 0.16961020268499852,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.911498696199998e-06,
"min": 3.911498696199998e-06,
"max": 0.00029535795154735,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1734496088599994e-05,
"min": 1.1734496088599994e-05,
"max": 0.0008444055185315,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.1013038,
"min": 0.1013038,
"max": 0.19845265000000006,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3039114,
"min": 0.20773755,
"max": 0.5814684999999999,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.505961999999999e-05,
"min": 7.505961999999999e-05,
"max": 0.004922787235,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00022517885999999998,
"min": 0.00022517885999999998,
"max": 0.014075278150000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1681737148",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1681739623"
},
"total": 2474.6632489350004,
"count": 1,
"self": 0.8860459800007447,
"children": {
"run_training.setup": {
"total": 0.1276129340000125,
"count": 1,
"self": 0.1276129340000125
},
"TrainerController.start_learning": {
"total": 2473.6495900209998,
"count": 1,
"self": 4.439944984984777,
"children": {
"TrainerController._reset_env": {
"total": 4.131420225000056,
"count": 1,
"self": 4.131420225000056
},
"TrainerController.advance": {
"total": 2464.875370998014,
"count": 232459,
"self": 4.9962945392226175,
"children": {
"env_step": {
"total": 1928.9861730158173,
"count": 232459,
"self": 1639.041940612784,
"children": {
"SubprocessEnvManager._take_step": {
"total": 286.9657615359911,
"count": 232459,
"self": 16.859026838918567,
"children": {
"TorchPolicy.evaluate": {
"total": 270.1067346970725,
"count": 222994,
"self": 270.1067346970725
}
}
},
"workers": {
"total": 2.9784708670422333,
"count": 232459,
"self": 0.0,
"children": {
"worker_root": {
"total": 2465.3128043369475,
"count": 232459,
"is_parallel": true,
"self": 1119.4877266601416,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009725790000629786,
"count": 1,
"is_parallel": true,
"self": 0.0002552260001493778,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007173529999136008,
"count": 2,
"is_parallel": true,
"self": 0.0007173529999136008
}
}
},
"UnityEnvironment.step": {
"total": 0.030284591000054206,
"count": 1,
"is_parallel": true,
"self": 0.00032056600002761115,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00018130399996607593,
"count": 1,
"is_parallel": true,
"self": 0.00018130399996607593
},
"communicator.exchange": {
"total": 0.029086541000083344,
"count": 1,
"is_parallel": true,
"self": 0.029086541000083344
},
"steps_from_proto": {
"total": 0.0006961799999771756,
"count": 1,
"is_parallel": true,
"self": 0.00020277199996598938,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004934080000111862,
"count": 2,
"is_parallel": true,
"self": 0.0004934080000111862
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1345.825077676806,
"count": 232458,
"is_parallel": true,
"self": 40.32945736496413,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 84.23528026405643,
"count": 232458,
"is_parallel": true,
"self": 84.23528026405643
},
"communicator.exchange": {
"total": 1127.3351514710214,
"count": 232458,
"is_parallel": true,
"self": 1127.3351514710214
},
"steps_from_proto": {
"total": 93.92518857676407,
"count": 232458,
"is_parallel": true,
"self": 35.6985731036126,
"children": {
"_process_rank_one_or_two_observation": {
"total": 58.22661547315147,
"count": 464916,
"is_parallel": true,
"self": 58.22661547315147
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 530.8929034429744,
"count": 232459,
"self": 6.80126220107843,
"children": {
"process_trajectory": {
"total": 136.87559613889584,
"count": 232459,
"self": 135.42836318889545,
"children": {
"RLTrainer._checkpoint": {
"total": 1.447232950000398,
"count": 10,
"self": 1.447232950000398
}
}
},
"_update_policy": {
"total": 387.21604510300017,
"count": 97,
"self": 327.09665178999217,
"children": {
"TorchPPOOptimizer.update": {
"total": 60.119393313008004,
"count": 2910,
"self": 60.119393313008004
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.425000391463982e-06,
"count": 1,
"self": 1.425000391463982e-06
},
"TrainerController._save_models": {
"total": 0.20285238800033767,
"count": 1,
"self": 0.003100557000379922,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19975183099995775,
"count": 1,
"self": 0.19975183099995775
}
}
}
}
}
}
}