ppo-Pyramids / run_logs /timers.json
salim4n's picture
Before Human construc Pyramids, now Ai break pyramids !
348c299
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.369171142578125,
"min": 0.369171142578125,
"max": 1.3730838298797607,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11104.66796875,
"min": 11104.66796875,
"max": 41653.87109375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989969.0,
"min": 29952.0,
"max": 989969.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989969.0,
"min": 29952.0,
"max": 989969.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4272448718547821,
"min": -0.10390423238277435,
"max": 0.49680212140083313,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 114.50162506103516,
"min": -25.04092025756836,
"max": 133.6397705078125,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.017328744754195213,
"min": -0.0019422292243689299,
"max": 0.42054614424705505,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.644103527069092,
"min": -0.5049796104431152,
"max": 99.66943359375,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06867809589242653,
"min": 0.06542866640988594,
"max": 0.0740367987159987,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9614933424939713,
"min": 0.4814572405630734,
"max": 1.0537817801620502,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.011599917466117491,
"min": 0.00031442342249344776,
"max": 0.013915682847889438,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.16239884452564488,
"min": 0.004087504492414821,
"max": 0.1966665912574778,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.708168859214287e-06,
"min": 7.708168859214287e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.000107914364029,
"min": 0.000107914364029,
"max": 0.0036333040888986996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10256935714285713,
"min": 0.10256935714285713,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4359709999999999,
"min": 1.3691136000000002,
"max": 2.6111013,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002666787785714286,
"min": 0.0002666787785714286,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037335029,
"min": 0.0037335029,
"max": 0.12112901987000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.013903380371630192,
"min": 0.013903380371630192,
"max": 0.43611976504325867,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.19464732706546783,
"min": 0.19464732706546783,
"max": 3.0528383255004883,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 463.7846153846154,
"min": 411.1142857142857,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30146.0,
"min": 15984.0,
"max": 32581.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4441415179234285,
"min": -1.0000000521540642,
"max": 1.5279941008809734,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 93.86919866502285,
"min": -32.000001668930054,
"max": 105.90099769830704,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4441415179234285,
"min": -1.0000000521540642,
"max": 1.5279941008809734,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 93.86919866502285,
"min": -32.000001668930054,
"max": 105.90099769830704,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.06636555038231354,
"min": 0.061722813463454845,
"max": 9.063474979251623,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.31376077485038,
"min": 4.19715131551493,
"max": 145.01559966802597,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1695551946",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1695554476"
},
"total": 2529.7733443079997,
"count": 1,
"self": 0.5451867769997989,
"children": {
"run_training.setup": {
"total": 0.046727481000061744,
"count": 1,
"self": 0.046727481000061744
},
"TrainerController.start_learning": {
"total": 2529.18143005,
"count": 1,
"self": 1.9853077190682598,
"children": {
"TrainerController._reset_env": {
"total": 5.345305349,
"count": 1,
"self": 5.345305349
},
"TrainerController.advance": {
"total": 2521.745399732931,
"count": 63613,
"self": 2.005824477864735,
"children": {
"env_step": {
"total": 1772.9377599160302,
"count": 63613,
"self": 1632.3258851380303,
"children": {
"SubprocessEnvManager._take_step": {
"total": 139.43193562597708,
"count": 63613,
"self": 5.7641975439614725,
"children": {
"TorchPolicy.evaluate": {
"total": 133.6677380820156,
"count": 62557,
"self": 133.6677380820156
}
}
},
"workers": {
"total": 1.1799391520228255,
"count": 63613,
"self": 0.0,
"children": {
"worker_root": {
"total": 2522.79427444705,
"count": 63613,
"is_parallel": true,
"self": 1033.7202959700344,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019593070001064916,
"count": 1,
"is_parallel": true,
"self": 0.0006158420003430365,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013434649997634551,
"count": 8,
"is_parallel": true,
"self": 0.0013434649997634551
}
}
},
"UnityEnvironment.step": {
"total": 0.10751622199995836,
"count": 1,
"is_parallel": true,
"self": 0.0006408869996903377,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004937559999689256,
"count": 1,
"is_parallel": true,
"self": 0.0004937559999689256
},
"communicator.exchange": {
"total": 0.09202031300014824,
"count": 1,
"is_parallel": true,
"self": 0.09202031300014824
},
"steps_from_proto": {
"total": 0.014361266000150863,
"count": 1,
"is_parallel": true,
"self": 0.0004194159998860414,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.013941850000264822,
"count": 8,
"is_parallel": true,
"self": 0.013941850000264822
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1489.0739784770158,
"count": 63612,
"is_parallel": true,
"self": 40.290895409004634,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 26.697485635986368,
"count": 63612,
"is_parallel": true,
"self": 26.697485635986368
},
"communicator.exchange": {
"total": 1298.09545534904,
"count": 63612,
"is_parallel": true,
"self": 1298.09545534904
},
"steps_from_proto": {
"total": 123.99014208298468,
"count": 63612,
"is_parallel": true,
"self": 25.03942855693481,
"children": {
"_process_rank_one_or_two_observation": {
"total": 98.95071352604987,
"count": 508896,
"is_parallel": true,
"self": 98.95071352604987
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 746.801815339036,
"count": 63613,
"self": 3.5011237700587117,
"children": {
"process_trajectory": {
"total": 124.7712087599873,
"count": 63613,
"self": 124.49647201298717,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2747367470001336,
"count": 2,
"self": 0.2747367470001336
}
}
},
"_update_policy": {
"total": 618.52948280899,
"count": 448,
"self": 406.76658157592215,
"children": {
"TorchPPOOptimizer.update": {
"total": 211.76290123306785,
"count": 22785,
"self": 211.76290123306785
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0909998309216462e-06,
"count": 1,
"self": 1.0909998309216462e-06
},
"TrainerController._save_models": {
"total": 0.10541615800048021,
"count": 1,
"self": 0.0014650670000264654,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10395109100045374,
"count": 1,
"self": 0.10395109100045374
}
}
}
}
}
}
}