ppo-Pyramids / run_logs /timers.json
dcarpintero's picture
First Push
71ca332
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5425539016723633,
"min": 0.5425539016723633,
"max": 1.468619704246521,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 16346.064453125,
"min": 16346.064453125,
"max": 44552.046875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989972.0,
"min": 29952.0,
"max": 989972.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989972.0,
"min": 29952.0,
"max": 989972.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.41159680485725403,
"min": -0.11817629635334015,
"max": 0.41159680485725403,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 112.36592864990234,
"min": -28.480487823486328,
"max": 112.36592864990234,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.0859936773777008,
"min": -1.211134433746338,
"max": 2.1595988273620605,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -23.476274490356445,
"min": -311.26153564453125,
"max": 557.176513671875,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06964862173837093,
"min": 0.06676053485023745,
"max": 0.07377512072322563,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.044729326075564,
"min": 0.5080945445789993,
"max": 1.0528265653045703,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.02242212637308209,
"min": 0.00043283175787830314,
"max": 0.6132332091218582,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.33633189559623133,
"min": 0.0038954858209047285,
"max": 9.198498136827872,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.4768175077600005e-06,
"min": 7.4768175077600005e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011215226261640001,
"min": 0.00011215226261640001,
"max": 0.0030070069976644,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10249224,
"min": 0.10249224,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5373836,
"min": 1.3886848,
"max": 2.3926580000000004,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002589747760000001,
"min": 0.0002589747760000001,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003884621640000001,
"min": 0.003884621640000001,
"max": 0.10025332644,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.00862919818609953,
"min": 0.008608328178524971,
"max": 0.34311988949775696,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12943796813488007,
"min": 0.12051659822463989,
"max": 2.401839256286621,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 392.02777777777777,
"min": 392.02777777777777,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28226.0,
"min": 15984.0,
"max": 33178.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.580419156036965,
"min": -1.0000000521540642,
"max": 1.580419156036965,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 115.37059839069843,
"min": -30.999801620841026,
"max": 115.37059839069843,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.580419156036965,
"min": -1.0000000521540642,
"max": 1.580419156036965,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 115.37059839069843,
"min": -30.999801620841026,
"max": 115.37059839069843,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03538861969200422,
"min": 0.03538861969200422,
"max": 7.488560322672129,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.583369237516308,
"min": 2.5102338370488724,
"max": 119.81696516275406,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1688477241",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1688479316"
},
"total": 2075.3085230530005,
"count": 1,
"self": 0.4743079080003554,
"children": {
"run_training.setup": {
"total": 0.03737399900001037,
"count": 1,
"self": 0.03737399900001037
},
"TrainerController.start_learning": {
"total": 2074.796841146,
"count": 1,
"self": 1.2344925670058728,
"children": {
"TrainerController._reset_env": {
"total": 4.260089194000102,
"count": 1,
"self": 4.260089194000102
},
"TrainerController.advance": {
"total": 2069.2089239729935,
"count": 63542,
"self": 1.3632959320025293,
"children": {
"env_step": {
"total": 1445.5772854350007,
"count": 63542,
"self": 1341.8772181949712,
"children": {
"SubprocessEnvManager._take_step": {
"total": 102.97800759501251,
"count": 63542,
"self": 4.5506318940765595,
"children": {
"TorchPolicy.evaluate": {
"total": 98.42737570093595,
"count": 62559,
"self": 98.42737570093595
}
}
},
"workers": {
"total": 0.7220596450169978,
"count": 63542,
"self": 0.0,
"children": {
"worker_root": {
"total": 2070.200419161969,
"count": 63542,
"is_parallel": true,
"self": 835.2634478619243,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001827699999921606,
"count": 1,
"is_parallel": true,
"self": 0.0005811420005557011,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012465579993659048,
"count": 8,
"is_parallel": true,
"self": 0.0012465579993659048
}
}
},
"UnityEnvironment.step": {
"total": 0.0884340779998638,
"count": 1,
"is_parallel": true,
"self": 0.0005736929997510742,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000507522000134486,
"count": 1,
"is_parallel": true,
"self": 0.000507522000134486
},
"communicator.exchange": {
"total": 0.08551485799989678,
"count": 1,
"is_parallel": true,
"self": 0.08551485799989678
},
"steps_from_proto": {
"total": 0.0018380050000814663,
"count": 1,
"is_parallel": true,
"self": 0.0003565310003068589,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014814739997746074,
"count": 8,
"is_parallel": true,
"self": 0.0014814739997746074
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1234.9369713000447,
"count": 63541,
"is_parallel": true,
"self": 32.25312550399394,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.245040674991287,
"count": 63541,
"is_parallel": true,
"self": 22.245040674991287
},
"communicator.exchange": {
"total": 1082.053668697943,
"count": 63541,
"is_parallel": true,
"self": 1082.053668697943
},
"steps_from_proto": {
"total": 98.38513642311636,
"count": 63541,
"is_parallel": true,
"self": 18.98155933913904,
"children": {
"_process_rank_one_or_two_observation": {
"total": 79.40357708397732,
"count": 508328,
"is_parallel": true,
"self": 79.40357708397732
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 622.2683426059903,
"count": 63542,
"self": 2.281052304993409,
"children": {
"process_trajectory": {
"total": 104.51294166901334,
"count": 63542,
"self": 104.30979057901368,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2031510899996647,
"count": 2,
"self": 0.2031510899996647
}
}
},
"_update_policy": {
"total": 515.4743486319835,
"count": 437,
"self": 330.22803220603737,
"children": {
"TorchPPOOptimizer.update": {
"total": 185.24631642594613,
"count": 22809,
"self": 185.24631642594613
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0410003596916795e-06,
"count": 1,
"self": 1.0410003596916795e-06
},
"TrainerController._save_models": {
"total": 0.09333437100031006,
"count": 1,
"self": 0.0013144550002834876,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09201991600002657,
"count": 1,
"self": 0.09201991600002657
}
}
}
}
}
}
}