bpugnaire's picture
First Push
d520cf2
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.2740525007247925,
"min": 0.2740525007247925,
"max": 1.5402588844299316,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 8182.111328125,
"min": 8182.111328125,
"max": 46725.29296875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989916.0,
"min": 29952.0,
"max": 989916.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989916.0,
"min": 29952.0,
"max": 989916.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7320511937141418,
"min": -0.0860162153840065,
"max": 0.7320511937141418,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 212.2948455810547,
"min": -20.729907989501953,
"max": 212.2948455810547,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.019426757469773293,
"min": -0.010349159128963947,
"max": 0.23527663946151733,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 5.633759498596191,
"min": -2.959859609603882,
"max": 56.466392517089844,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06607815693792012,
"min": 0.06298661256183145,
"max": 0.0727185758850759,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9250941971308817,
"min": 0.4886501151556551,
"max": 1.0748833447555537,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01460864452303763,
"min": 0.0009774613464094888,
"max": 0.0167398594837249,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2045210233225268,
"min": 0.006842229424866422,
"max": 0.2343580327721486,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.661776017535713e-06,
"min": 7.661776017535713e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010726486424549998,
"min": 0.00010726486424549998,
"max": 0.0036329617890127996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10255389285714285,
"min": 0.10255389285714285,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4357544999999998,
"min": 1.3691136000000002,
"max": 2.6109872,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002651338964285715,
"min": 0.0002651338964285715,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003711874550000001,
"min": 0.003711874550000001,
"max": 0.12111762128000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011309400200843811,
"min": 0.011309400200843811,
"max": 0.39527592062950134,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15833160281181335,
"min": 0.15833160281181335,
"max": 2.7669315338134766,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 271.7522935779817,
"min": 251.86776859504133,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29621.0,
"min": 15984.0,
"max": 32371.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7106018010865558,
"min": -1.0000000521540642,
"max": 1.7477999866008758,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 188.16619811952114,
"min": -32.000001668930054,
"max": 209.7359983921051,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7106018010865558,
"min": -1.0000000521540642,
"max": 1.7477999866008758,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 188.16619811952114,
"min": -32.000001668930054,
"max": 209.7359983921051,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03171243051500906,
"min": 0.030701358927763066,
"max": 8.038420243188739,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.488367356650997,
"min": 3.488367356650997,
"max": 128.61472389101982,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1704536600",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1704539086"
},
"total": 2485.462168256,
"count": 1,
"self": 0.49665773600008833,
"children": {
"run_training.setup": {
"total": 0.05154931200002011,
"count": 1,
"self": 0.05154931200002011
},
"TrainerController.start_learning": {
"total": 2484.913961208,
"count": 1,
"self": 1.6698278129319988,
"children": {
"TrainerController._reset_env": {
"total": 3.0924113579999357,
"count": 1,
"self": 3.0924113579999357
},
"TrainerController.advance": {
"total": 2480.0631900760677,
"count": 64314,
"self": 1.7710278030735935,
"children": {
"env_step": {
"total": 1805.4001162789887,
"count": 64314,
"self": 1654.6496248250808,
"children": {
"SubprocessEnvManager._take_step": {
"total": 149.74779008897406,
"count": 64314,
"self": 5.4807477519433405,
"children": {
"TorchPolicy.evaluate": {
"total": 144.26704233703072,
"count": 62542,
"self": 144.26704233703072
}
}
},
"workers": {
"total": 1.0027013649338414,
"count": 64314,
"self": 0.0,
"children": {
"worker_root": {
"total": 2479.1795190170405,
"count": 64314,
"is_parallel": true,
"self": 961.6578393350635,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0037378690000196,
"count": 1,
"is_parallel": true,
"self": 0.0025774870001669115,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011603819998526888,
"count": 8,
"is_parallel": true,
"self": 0.0011603819998526888
}
}
},
"UnityEnvironment.step": {
"total": 0.053241848999959984,
"count": 1,
"is_parallel": true,
"self": 0.0006492319998869789,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004993559999775243,
"count": 1,
"is_parallel": true,
"self": 0.0004993559999775243
},
"communicator.exchange": {
"total": 0.050156635000007554,
"count": 1,
"is_parallel": true,
"self": 0.050156635000007554
},
"steps_from_proto": {
"total": 0.0019366260000879265,
"count": 1,
"is_parallel": true,
"self": 0.0005196980001755946,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014169279999123319,
"count": 8,
"is_parallel": true,
"self": 0.0014169279999123319
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1517.521679681977,
"count": 64313,
"is_parallel": true,
"self": 38.94968400103994,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 27.54794296497721,
"count": 64313,
"is_parallel": true,
"self": 27.54794296497721
},
"communicator.exchange": {
"total": 1337.6674896180016,
"count": 64313,
"is_parallel": true,
"self": 1337.6674896180016
},
"steps_from_proto": {
"total": 113.35656309795831,
"count": 64313,
"is_parallel": true,
"self": 23.649676890854835,
"children": {
"_process_rank_one_or_two_observation": {
"total": 89.70688620710348,
"count": 514504,
"is_parallel": true,
"self": 89.70688620710348
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 672.8920459940055,
"count": 64314,
"self": 3.1672955359798607,
"children": {
"process_trajectory": {
"total": 140.4743919440274,
"count": 64314,
"self": 140.27425966402745,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20013227999993433,
"count": 2,
"self": 0.20013227999993433
}
}
},
"_update_policy": {
"total": 529.2503585139982,
"count": 451,
"self": 314.49438980000525,
"children": {
"TorchPPOOptimizer.update": {
"total": 214.75596871399296,
"count": 22791,
"self": 214.75596871399296
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.360001058666967e-07,
"count": 1,
"self": 9.360001058666967e-07
},
"TrainerController._save_models": {
"total": 0.08853102500006571,
"count": 1,
"self": 0.00222289399971487,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08630813100035084,
"count": 1,
"self": 0.08630813100035084
}
}
}
}
}
}
}