ppo-Huggy-v1 / run_logs /timers.json
claterza's picture
Huggy
537874e
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.3974918127059937,
"min": 1.3974918127059937,
"max": 1.427964687347412,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 68637.8125,
"min": 68145.6328125,
"max": 78677.7109375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 91.4574074074074,
"min": 86.80140597539543,
"max": 418.55,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49387.0,
"min": 48840.0,
"max": 50226.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999918.0,
"min": 49722.0,
"max": 1999918.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999918.0,
"min": 49722.0,
"max": 1999918.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3616604804992676,
"min": 0.17667940258979797,
"max": 2.4137935638427734,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1275.296630859375,
"min": 21.02484893798828,
"max": 1356.615478515625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7031523921975382,
"min": 1.6934683362976843,
"max": 3.8404612168529386,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1999.7022917866707,
"min": 201.52273201942444,
"max": 2165.278116941452,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7031523921975382,
"min": 1.6934683362976843,
"max": 3.8404612168529386,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1999.7022917866707,
"min": 201.52273201942444,
"max": 2165.278116941452,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015998956241310227,
"min": 0.013725232127277802,
"max": 0.020862704302029064,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04799686872393068,
"min": 0.027450464254555603,
"max": 0.06258811290608719,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05334815556804339,
"min": 0.020789316948503257,
"max": 0.06708911669751008,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16004446670413017,
"min": 0.041578633897006514,
"max": 0.18906249602635702,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.4539988486999986e-06,
"min": 3.4539988486999986e-06,
"max": 0.00029526502657832506,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0361996546099996e-05,
"min": 1.0361996546099996e-05,
"max": 0.0008435113688295498,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10115129999999999,
"min": 0.10115129999999999,
"max": 0.19842167500000002,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30345389999999994,
"min": 0.20747689999999994,
"max": 0.5811704500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.744986999999998e-05,
"min": 6.744986999999998e-05,
"max": 0.0049212415824999994,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020234960999999994,
"min": 0.00020234960999999994,
"max": 0.014060405454999998,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670785649",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670787986"
},
"total": 2336.456763914,
"count": 1,
"self": 0.44574988299973484,
"children": {
"run_training.setup": {
"total": 0.11129646599999887,
"count": 1,
"self": 0.11129646599999887
},
"TrainerController.start_learning": {
"total": 2335.899717565,
"count": 1,
"self": 4.0358055380015685,
"children": {
"TrainerController._reset_env": {
"total": 10.512352733000057,
"count": 1,
"self": 10.512352733000057
},
"TrainerController.advance": {
"total": 2321.234406281999,
"count": 231484,
"self": 4.466584802084526,
"children": {
"env_step": {
"total": 1842.6678066239438,
"count": 231484,
"self": 1544.654403841924,
"children": {
"SubprocessEnvManager._take_step": {
"total": 295.2788948940108,
"count": 231484,
"self": 15.314673267069452,
"children": {
"TorchPolicy.evaluate": {
"total": 279.96422162694137,
"count": 222820,
"self": 69.781757980051,
"children": {
"TorchPolicy.sample_actions": {
"total": 210.18246364689037,
"count": 222820,
"self": 210.18246364689037
}
}
}
}
},
"workers": {
"total": 2.7345078880089204,
"count": 231484,
"self": 0.0,
"children": {
"worker_root": {
"total": 2327.613124777957,
"count": 231484,
"is_parallel": true,
"self": 1064.1165518279245,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020290310000063982,
"count": 1,
"is_parallel": true,
"self": 0.0003271890001315114,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017018419998748868,
"count": 2,
"is_parallel": true,
"self": 0.0017018419998748868
}
}
},
"UnityEnvironment.step": {
"total": 0.028899032000026637,
"count": 1,
"is_parallel": true,
"self": 0.0002913990000479316,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00018046599996068835,
"count": 1,
"is_parallel": true,
"self": 0.00018046599996068835
},
"communicator.exchange": {
"total": 0.027672522999978355,
"count": 1,
"is_parallel": true,
"self": 0.027672522999978355
},
"steps_from_proto": {
"total": 0.0007546440000396615,
"count": 1,
"is_parallel": true,
"self": 0.00029050000000552245,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000464144000034139,
"count": 2,
"is_parallel": true,
"self": 0.000464144000034139
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1263.4965729500327,
"count": 231483,
"is_parallel": true,
"self": 36.01794878290093,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 83.89095257897566,
"count": 231483,
"is_parallel": true,
"self": 83.89095257897566
},
"communicator.exchange": {
"total": 1044.9600496670778,
"count": 231483,
"is_parallel": true,
"self": 1044.9600496670778
},
"steps_from_proto": {
"total": 98.62762192107834,
"count": 231483,
"is_parallel": true,
"self": 42.74694928117685,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.88067263990149,
"count": 462966,
"is_parallel": true,
"self": 55.88067263990149
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 474.1000148559708,
"count": 231484,
"self": 6.451129291035045,
"children": {
"process_trajectory": {
"total": 151.97825568593782,
"count": 231484,
"self": 151.48968037693783,
"children": {
"RLTrainer._checkpoint": {
"total": 0.488575308999998,
"count": 4,
"self": 0.488575308999998
}
}
},
"_update_policy": {
"total": 315.67062987899794,
"count": 97,
"self": 261.89525298399724,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.775376895000704,
"count": 2910,
"self": 53.775376895000704
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0909998309216462e-06,
"count": 1,
"self": 1.0909998309216462e-06
},
"TrainerController._save_models": {
"total": 0.11715192099973137,
"count": 1,
"self": 0.002134751999619766,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11501716900011161,
"count": 1,
"self": 0.11501716900011161
}
}
}
}
}
}
}