ppo-PushBlock / run_logs /timers.json
saikiranp's picture
First training of PushBlock
a699850
raw
history blame
18 kB
{
"name": "root",
"gauges": {
"PushBlock.Policy.Entropy.mean": {
"value": 0.5013278126716614,
"min": 0.4978146553039551,
"max": 1.8884873390197754,
"count": 16
},
"PushBlock.Policy.Entropy.sum": {
"value": 30111.751953125,
"min": 29868.87890625,
"max": 114578.3046875,
"count": 16
},
"PushBlock.Environment.EpisodeLength.mean": {
"value": 29.096056622851364,
"min": 26.901285583103764,
"max": 800.2045454545455,
"count": 16
},
"PushBlock.Environment.EpisodeLength.sum": {
"value": 57552.0,
"min": 35209.0,
"max": 70410.0,
"count": 16
},
"PushBlock.Step.mean": {
"value": 959979.0,
"min": 59957.0,
"max": 959979.0,
"count": 16
},
"PushBlock.Step.sum": {
"value": 959979.0,
"min": 59957.0,
"max": 959979.0,
"count": 16
},
"PushBlock.Policy.ExtrinsicValueEstimate.mean": {
"value": 3.929089069366455,
"min": 0.05562363937497139,
"max": 4.115839004516602,
"count": 16
},
"PushBlock.Policy.ExtrinsicValueEstimate.sum": {
"value": 9029.046875,
"min": 53.120574951171875,
"max": 9943.8671875,
"count": 16
},
"PushBlock.Environment.CumulativeReward.mean": {
"value": 4.924831472431366,
"min": 0.7898636480624025,
"max": 4.94725110939951,
"count": 16
},
"PushBlock.Environment.CumulativeReward.sum": {
"value": 9746.241483941674,
"min": 34.75400051474571,
"max": 10775.112916272134,
"count": 16
},
"PushBlock.Policy.ExtrinsicReward.mean": {
"value": 4.924831472431366,
"min": 0.7898636480624025,
"max": 4.94725110939951,
"count": 16
},
"PushBlock.Policy.ExtrinsicReward.sum": {
"value": 9746.241483941674,
"min": 34.75400051474571,
"max": 10775.112916272134,
"count": 16
},
"PushBlock.Losses.PolicyLoss.mean": {
"value": 0.06821112005821504,
"min": 0.06610743193746672,
"max": 0.07124047085971115,
"count": 16
},
"PushBlock.Losses.PolicyLoss.sum": {
"value": 1.9781224816882361,
"min": 1.7810117714927787,
"max": 2.0536290933281034,
"count": 16
},
"PushBlock.Losses.ValueLoss.mean": {
"value": 0.2008959916848475,
"min": 0.04920394663841351,
"max": 0.3776434305198919,
"count": 16
},
"PushBlock.Losses.ValueLoss.sum": {
"value": 5.825983758860578,
"min": 1.2300986659603377,
"max": 10.951659485076865,
"count": 16
},
"PushBlock.Policy.LearningRate.mean": {
"value": 2.1059534359496555e-05,
"min": 2.1059534359496555e-05,
"max": 0.00029034048321984,
"count": 16
},
"PushBlock.Policy.LearningRate.sum": {
"value": 0.0006107264964254,
"min": 0.0006107264964254,
"max": 0.0076451291516236985,
"count": 16
},
"PushBlock.Policy.Epsilon.mean": {
"value": 0.10701981379310346,
"min": 0.10701981379310346,
"max": 0.19678016000000004,
"count": 16
},
"PushBlock.Policy.Epsilon.sum": {
"value": 3.1035746000000004,
"min": 3.1035746000000004,
"max": 5.366889400000001,
"count": 16
},
"PushBlock.Policy.Beta.mean": {
"value": 0.0007112793979310347,
"min": 0.0007112793979310347,
"max": 0.009678337984000002,
"count": 16
},
"PushBlock.Policy.Beta.sum": {
"value": 0.020627102540000004,
"min": 0.020627102540000004,
"max": 0.25486279237000004,
"count": 16
},
"PushBlock.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
},
"PushBlock.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673609870",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PushBlock.yaml --env=./training-envs-executables/linux/PushBlock/PushBlock --run-id=PushBlock Training --no-graphics --force",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673610885"
},
"total": 1015.428120179,
"count": 1,
"self": 0.3739461669999855,
"children": {
"run_training.setup": {
"total": 0.10166472699995666,
"count": 1,
"self": 0.10166472699995666
},
"TrainerController.start_learning": {
"total": 1014.952509285,
"count": 1,
"self": 0.965871988962931,
"children": {
"TrainerController._reset_env": {
"total": 6.276484015000051,
"count": 1,
"self": 6.276484015000051
},
"TrainerController.advance": {
"total": 1007.6487054310369,
"count": 47718,
"self": 0.9406912270151224,
"children": {
"env_step": {
"total": 616.8289498020071,
"count": 47718,
"self": 565.7502571090608,
"children": {
"SubprocessEnvManager._take_step": {
"total": 50.54634361095816,
"count": 47718,
"self": 2.435593314966013,
"children": {
"TorchPolicy.evaluate": {
"total": 48.11075029599215,
"count": 31270,
"self": 15.39985357400235,
"children": {
"TorchPolicy.sample_actions": {
"total": 32.7108967219898,
"count": 31270,
"self": 32.7108967219898
}
}
}
}
},
"workers": {
"total": 0.532349081988059,
"count": 47718,
"self": 0.0,
"children": {
"worker_root": {
"total": 1012.4502011970051,
"count": 47718,
"is_parallel": true,
"self": 519.1761926560424,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0022522739999430996,
"count": 1,
"is_parallel": true,
"self": 0.000777586999788582,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014746870001545176,
"count": 4,
"is_parallel": true,
"self": 0.0014746870001545176
}
}
},
"UnityEnvironment.step": {
"total": 0.03303778199995122,
"count": 1,
"is_parallel": true,
"self": 0.0007162829998605957,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000720770000043558,
"count": 1,
"is_parallel": true,
"self": 0.000720770000043558
},
"communicator.exchange": {
"total": 0.029238625000061802,
"count": 1,
"is_parallel": true,
"self": 0.029238625000061802
},
"steps_from_proto": {
"total": 0.0023621039999852655,
"count": 1,
"is_parallel": true,
"self": 0.0005520999999362175,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001810004000049048,
"count": 4,
"is_parallel": true,
"self": 0.001810004000049048
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 493.27400854096265,
"count": 47717,
"is_parallel": true,
"self": 23.452266761988426,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 20.47216375398841,
"count": 47717,
"is_parallel": true,
"self": 20.47216375398841
},
"communicator.exchange": {
"total": 374.7922909120025,
"count": 47717,
"is_parallel": true,
"self": 374.7922909120025
},
"steps_from_proto": {
"total": 74.55728711298332,
"count": 47717,
"is_parallel": true,
"self": 14.021818385979827,
"children": {
"_process_rank_one_or_two_observation": {
"total": 60.53546872700349,
"count": 190868,
"is_parallel": true,
"self": 60.53546872700349
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 389.8790644020147,
"count": 47718,
"self": 1.7906268160321588,
"children": {
"process_trajectory": {
"total": 108.9426736099831,
"count": 47718,
"self": 108.80744560898302,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1352280010000868,
"count": 2,
"self": 0.1352280010000868
}
}
},
"_update_policy": {
"total": 279.14576397599944,
"count": 477,
"self": 143.75034034898988,
"children": {
"TorchPPOOptimizer.update": {
"total": 135.39542362700956,
"count": 23076,
"self": 135.39542362700956
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.640001058462076e-07,
"count": 1,
"self": 7.640001058462076e-07
},
"TrainerController._save_models": {
"total": 0.06144708600004378,
"count": 1,
"self": 0.0009176949999982753,
"children": {
"RLTrainer._checkpoint": {
"total": 0.06052939100004551,
"count": 1,
"self": 0.06052939100004551
}
}
}
}
}
}
}