ppo-Huggy / run_logs /timers.json
ignamonte's picture
Huggy
2368b62
raw
history blame contribute delete
No virus
17.9 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4049372673034668,
"min": 1.4049372673034668,
"max": 1.4286155700683594,
"count": 39
},
"Huggy.Policy.Entropy.sum": {
"value": 70173.8046875,
"min": 67885.1875,
"max": 76982.0546875,
"count": 39
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 82.31,
"min": 75.29555895865238,
"max": 420.0083333333333,
"count": 39
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49386.0,
"min": 49085.0,
"max": 50401.0,
"count": 39
},
"Huggy.Step.mean": {
"value": 1949906.0,
"min": 49999.0,
"max": 1949906.0,
"count": 39
},
"Huggy.Step.sum": {
"value": 1949906.0,
"min": 49999.0,
"max": 1949906.0,
"count": 39
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.446791887283325,
"min": 0.09747413545846939,
"max": 2.4741194248199463,
"count": 39
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1468.0750732421875,
"min": 11.599422454833984,
"max": 1601.995849609375,
"count": 39
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7988817846775054,
"min": 1.837012348806157,
"max": 3.976086442073186,
"count": 39
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2279.3290708065033,
"min": 218.60446950793266,
"max": 2522.521385014057,
"count": 39
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7988817846775054,
"min": 1.837012348806157,
"max": 3.976086442073186,
"count": 39
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2279.3290708065033,
"min": 218.60446950793266,
"max": 2522.521385014057,
"count": 39
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.018656118672030667,
"min": 0.012707770229462767,
"max": 0.020685278445792696,
"count": 39
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.03731223734406133,
"min": 0.025415540458925533,
"max": 0.05219667065975954,
"count": 39
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05366541085143884,
"min": 0.021912601217627524,
"max": 0.059947727185984456,
"count": 39
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.10733082170287768,
"min": 0.04382520243525505,
"max": 0.1781808293114106,
"count": 39
},
"Huggy.Policy.LearningRate.mean": {
"value": 1.1567946144050005e-05,
"min": 1.1567946144050005e-05,
"max": 0.00029527395157534996,
"count": 39
},
"Huggy.Policy.LearningRate.sum": {
"value": 2.313589228810001e-05,
"min": 2.313589228810001e-05,
"max": 0.0008436718687760501,
"count": 39
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10385595,
"min": 0.10385595,
"max": 0.19842465000000004,
"count": 39
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.2077119,
"min": 0.2077119,
"max": 0.5812239499999998,
"count": 39
},
"Huggy.Policy.Beta.mean": {
"value": 0.00020241190500000018,
"min": 0.00020241190500000018,
"max": 0.004921390035000001,
"count": 39
},
"Huggy.Policy.Beta.sum": {
"value": 0.00040482381000000035,
"min": 0.00040482381000000035,
"max": 0.014063075104999996,
"count": 39
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 39
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 39
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670764317",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670766462"
},
"total": 2145.0520965799997,
"count": 1,
"self": 10.008713797999462,
"children": {
"run_training.setup": {
"total": 0.105532390999997,
"count": 1,
"self": 0.105532390999997
},
"TrainerController.start_learning": {
"total": 2134.937850391,
"count": 1,
"self": 3.7903234859554686,
"children": {
"TrainerController._reset_env": {
"total": 10.83409558400001,
"count": 1,
"self": 10.83409558400001
},
"TrainerController.advance": {
"total": 2120.130292083045,
"count": 227242,
"self": 4.046155947153238,
"children": {
"env_step": {
"total": 1668.3730619499931,
"count": 227242,
"self": 1400.8124546220502,
"children": {
"SubprocessEnvManager._take_step": {
"total": 265.05734577899784,
"count": 227242,
"self": 13.910732319060457,
"children": {
"TorchPolicy.evaluate": {
"total": 251.14661345993738,
"count": 217558,
"self": 63.41278021890196,
"children": {
"TorchPolicy.sample_actions": {
"total": 187.73383324103543,
"count": 217558,
"self": 187.73383324103543
}
}
}
}
},
"workers": {
"total": 2.5032615489450336,
"count": 227241,
"self": 0.0,
"children": {
"worker_root": {
"total": 2127.3495334590534,
"count": 227241,
"is_parallel": true,
"self": 977.2002030890801,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0022285759999931543,
"count": 1,
"is_parallel": true,
"self": 0.00029063800002404605,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019379379999691082,
"count": 2,
"is_parallel": true,
"self": 0.0019379379999691082
}
}
},
"UnityEnvironment.step": {
"total": 0.02690880299996934,
"count": 1,
"is_parallel": true,
"self": 0.00029030199993940187,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00017719400000260066,
"count": 1,
"is_parallel": true,
"self": 0.00017719400000260066
},
"communicator.exchange": {
"total": 0.025706061000050795,
"count": 1,
"is_parallel": true,
"self": 0.025706061000050795
},
"steps_from_proto": {
"total": 0.0007352459999765415,
"count": 1,
"is_parallel": true,
"self": 0.0002660990001004393,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004691469998761022,
"count": 2,
"is_parallel": true,
"self": 0.0004691469998761022
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1150.1493303699733,
"count": 227240,
"is_parallel": true,
"self": 33.5120115350594,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 74.0062600799472,
"count": 227240,
"is_parallel": true,
"self": 74.0062600799472
},
"communicator.exchange": {
"total": 952.8639262280135,
"count": 227240,
"is_parallel": true,
"self": 952.8639262280135
},
"steps_from_proto": {
"total": 89.76713252695322,
"count": 227240,
"is_parallel": true,
"self": 36.901885798056014,
"children": {
"_process_rank_one_or_two_observation": {
"total": 52.865246728897205,
"count": 454480,
"is_parallel": true,
"self": 52.865246728897205
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 447.71107418589827,
"count": 227241,
"self": 5.857419967037117,
"children": {
"process_trajectory": {
"total": 142.15309032286166,
"count": 227241,
"self": 141.80509532086137,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3479950020002889,
"count": 3,
"self": 0.3479950020002889
}
}
},
"_update_policy": {
"total": 299.7005638959995,
"count": 94,
"self": 248.01190740500692,
"children": {
"TorchPPOOptimizer.update": {
"total": 51.68865649099257,
"count": 2820,
"self": 51.68865649099257
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.475999852118548e-06,
"count": 1,
"self": 1.475999852118548e-06
},
"TrainerController._save_models": {
"total": 0.1831377620001149,
"count": 1,
"self": 0.0028474350001488347,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18029032699996606,
"count": 1,
"self": 0.18029032699996606
}
}
}
}
}
}
}