ppo-Pyramids / run_logs /timers.json
mikedad's picture
First training of Pyramids
903bd68
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5390892624855042,
"min": 0.5390892624855042,
"max": 1.5593442916870117,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 16241.681640625,
"min": 16241.681640625,
"max": 47304.26953125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989961.0,
"min": 29952.0,
"max": 989961.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989961.0,
"min": 29952.0,
"max": 989961.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3679182529449463,
"min": -0.2248402237892151,
"max": 0.40893039107322693,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 97.49833679199219,
"min": -53.287132263183594,
"max": 107.54869079589844,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.3092624843120575,
"min": -0.3092624843120575,
"max": 0.15915042161941528,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -81.95455932617188,
"min": -81.95455932617188,
"max": 37.718650817871094,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0663803664124711,
"min": 0.06521619034166214,
"max": 0.07437726868137172,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9293251297745954,
"min": 0.47215893594651814,
"max": 1.0587588445838156,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.02464498167960084,
"min": 7.168061418446273e-05,
"max": 0.02464498167960084,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.3450297435144118,
"min": 0.0008601673702135527,
"max": 0.3450297435144118,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.675511727242859e-06,
"min": 7.675511727242859e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010745716418140003,
"min": 0.00010745716418140003,
"max": 0.0031381799539400997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10255847142857144,
"min": 0.10255847142857144,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4358186000000002,
"min": 1.3691136000000002,
"max": 2.3460599,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026559129571428577,
"min": 0.00026559129571428577,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037182781400000005,
"min": 0.0037182781400000005,
"max": 0.10463138401000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.005210391245782375,
"min": 0.005210391245782375,
"max": 0.21293902397155762,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.0729454755783081,
"min": 0.0729454755783081,
"max": 1.4905731678009033,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 484.57142857142856,
"min": 475.41269841269843,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30528.0,
"min": 15984.0,
"max": 33288.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.2843491780379461,
"min": -1.0000000521540642,
"max": 1.4055841003382017,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 80.91399821639061,
"min": -32.000001668930054,
"max": 88.5517983213067,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.2843491780379461,
"min": -1.0000000521540642,
"max": 1.4055841003382017,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 80.91399821639061,
"min": -32.000001668930054,
"max": 88.5517983213067,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.026754732689744466,
"min": 0.02612496932528706,
"max": 4.479016438592225,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.6855481594539015,
"min": 1.6458730674930848,
"max": 71.6642630174756,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1703602748",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1703604907"
},
"total": 2158.59164945,
"count": 1,
"self": 0.4766006190002372,
"children": {
"run_training.setup": {
"total": 0.06884812099997362,
"count": 1,
"self": 0.06884812099997362
},
"TrainerController.start_learning": {
"total": 2158.04620071,
"count": 1,
"self": 1.5453916851010945,
"children": {
"TrainerController._reset_env": {
"total": 2.1010409140003503,
"count": 1,
"self": 2.1010409140003503
},
"TrainerController.advance": {
"total": 2154.3108202428984,
"count": 63461,
"self": 1.5989859000410434,
"children": {
"env_step": {
"total": 1525.7365561059319,
"count": 63461,
"self": 1385.7640349640114,
"children": {
"SubprocessEnvManager._take_step": {
"total": 139.0374795229368,
"count": 63461,
"self": 5.088378932812702,
"children": {
"TorchPolicy.evaluate": {
"total": 133.9491005901241,
"count": 62572,
"self": 133.9491005901241
}
}
},
"workers": {
"total": 0.9350416189836324,
"count": 63461,
"self": 0.0,
"children": {
"worker_root": {
"total": 2152.544307878044,
"count": 63461,
"is_parallel": true,
"self": 895.5127977370353,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018347079999330163,
"count": 1,
"is_parallel": true,
"self": 0.0005834559997310862,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00125125200020193,
"count": 8,
"is_parallel": true,
"self": 0.00125125200020193
}
}
},
"UnityEnvironment.step": {
"total": 0.051021830000081536,
"count": 1,
"is_parallel": true,
"self": 0.0006035650003468618,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005106270000396762,
"count": 1,
"is_parallel": true,
"self": 0.0005106270000396762
},
"communicator.exchange": {
"total": 0.04820219099974565,
"count": 1,
"is_parallel": true,
"self": 0.04820219099974565
},
"steps_from_proto": {
"total": 0.001705446999949345,
"count": 1,
"is_parallel": true,
"self": 0.0003689579998535919,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001336489000095753,
"count": 8,
"is_parallel": true,
"self": 0.001336489000095753
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1257.0315101410088,
"count": 63460,
"is_parallel": true,
"self": 35.89969953806394,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.958572241024285,
"count": 63460,
"is_parallel": true,
"self": 25.958572241024285
},
"communicator.exchange": {
"total": 1089.9492681559586,
"count": 63460,
"is_parallel": true,
"self": 1089.9492681559586
},
"steps_from_proto": {
"total": 105.2239702059619,
"count": 63460,
"is_parallel": true,
"self": 21.632838686037758,
"children": {
"_process_rank_one_or_two_observation": {
"total": 83.59113151992415,
"count": 507680,
"is_parallel": true,
"self": 83.59113151992415
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 626.9752782369255,
"count": 63461,
"self": 2.757885666922448,
"children": {
"process_trajectory": {
"total": 127.07920654500049,
"count": 63461,
"self": 126.89430934300026,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1848972020002293,
"count": 2,
"self": 0.1848972020002293
}
}
},
"_update_policy": {
"total": 497.1381860250026,
"count": 434,
"self": 293.10731915304996,
"children": {
"TorchPPOOptimizer.update": {
"total": 204.03086687195264,
"count": 22848,
"self": 204.03086687195264
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4090001059230417e-06,
"count": 1,
"self": 1.4090001059230417e-06
},
"TrainerController._save_models": {
"total": 0.08894645899999887,
"count": 1,
"self": 0.001974519000214059,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08697193999978481,
"count": 1,
"self": 0.08697193999978481
}
}
}
}
}
}
}