VicBeltran's picture
first test commit
22f0e9d
raw
history blame contribute delete
No virus
18.3 kB
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5920994281768799,
"min": 0.5920994281768799,
"max": 1.396640419960022,
"count": 3
},
"Pyramids.Policy.Entropy.sum": {
"value": 17772.45703125,
"min": 17772.45703125,
"max": 42368.484375,
"count": 3
},
"Pyramids.Step.mean": {
"value": 89895.0,
"min": 29952.0,
"max": 89895.0,
"count": 3
},
"Pyramids.Step.sum": {
"value": 89895.0,
"min": 29952.0,
"max": 89895.0,
"count": 3
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.08060117810964584,
"min": -0.08060117810964584,
"max": 0.1492552012205124,
"count": 3
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -19.50548553466797,
"min": -19.50548553466797,
"max": 35.37348175048828,
"count": 3
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.910521388053894,
"min": 0.910521388053894,
"max": 1.4801007509231567,
"count": 3
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 220.34617614746094,
"min": 220.34617614746094,
"max": 356.70428466796875,
"count": 3
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.04922671732786335,
"min": 0.04922671732786335,
"max": 0.05336748282426961,
"count": 3
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.5907206079343602,
"min": 0.3735723797698873,
"max": 0.5907206079343602,
"count": 3
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.002812625459016786,
"min": 0.002812625459016786,
"max": 0.058140459392761475,
"count": 3
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.03375150550820143,
"min": 0.03375150550820143,
"max": 0.40698321574933033,
"count": 3
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.774157408616665e-05,
"min": 7.774157408616665e-05,
"max": 0.0002515063018788571,
"count": 3
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0009328988890339999,
"min": 0.0009328988890339999,
"max": 0.0017605441131519997,
"count": 3
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.12591383333333336,
"min": 0.12591383333333336,
"max": 0.1838354285714286,
"count": 3
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5109660000000003,
"min": 1.2868480000000002,
"max": 1.5565020000000003,
"count": 3
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0025987919500000006,
"min": 0.0025987919500000006,
"max": 0.008385159314285715,
"count": 3
},
"Pyramids.Policy.Beta.sum": {
"value": 0.031185503400000005,
"min": 0.031185503400000005,
"max": 0.0586961152,
"count": 3
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.12982051074504852,
"min": 0.12982051074504852,
"max": 0.4058314263820648,
"count": 3
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 1.557846188545227,
"min": 1.557846188545227,
"max": 2.840820074081421,
"count": 3
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 962.030303030303,
"min": 962.030303030303,
"max": 999.0,
"count": 3
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31747.0,
"min": 15984.0,
"max": 31843.0,
"count": 3
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.7204667153683576,
"min": -1.0000000521540642,
"max": -0.7204667153683576,
"count": 3
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -23.7754016071558,
"min": -27.873001739382744,
"max": -16.000000834465027,
"count": 3
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.7204667153683576,
"min": -1.0000000521540642,
"max": -0.7204667153683576,
"count": 3
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -23.7754016071558,
"min": -27.873001739382744,
"max": -16.000000834465027,
"count": 3
},
"Pyramids.Policy.RndReward.mean": {
"value": 7.310272389289104,
"min": 7.310272389289104,
"max": 37.97674008458853,
"count": 3
},
"Pyramids.Policy.RndReward.sum": {
"value": 241.23898884654045,
"min": 241.23898884654045,
"max": 607.6278413534164,
"count": 3
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 3
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 3
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1692017717",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/home/victor/.local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1692017912"
},
"total": 195.31421026000044,
"count": 1,
"self": 0.3698744060006902,
"children": {
"run_training.setup": {
"total": 0.020221199999923556,
"count": 1,
"self": 0.020221199999923556
},
"TrainerController.start_learning": {
"total": 194.92411465399982,
"count": 1,
"self": 0.11492089701278019,
"children": {
"TrainerController._reset_env": {
"total": 5.843547811000008,
"count": 1,
"self": 5.843547811000008
},
"TrainerController.advance": {
"total": 188.84464944398724,
"count": 6309,
"self": 0.107466295982249,
"children": {
"env_step": {
"total": 118.0586339260035,
"count": 6309,
"self": 98.41974282002502,
"children": {
"SubprocessEnvManager._take_step": {
"total": 19.56722700800401,
"count": 6309,
"self": 0.40011209500471523,
"children": {
"TorchPolicy.evaluate": {
"total": 19.167114912999295,
"count": 6297,
"self": 19.167114912999295
}
}
},
"workers": {
"total": 0.07166409797446249,
"count": 6309,
"self": 0.0,
"children": {
"worker_root": {
"total": 194.55289205097688,
"count": 6309,
"is_parallel": true,
"self": 103.75594365197367,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0013616999999612744,
"count": 1,
"is_parallel": true,
"self": 0.0004546000000118511,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0009070999999494234,
"count": 8,
"is_parallel": true,
"self": 0.0009070999999494234
}
}
},
"UnityEnvironment.step": {
"total": 0.03046879999965313,
"count": 1,
"is_parallel": true,
"self": 0.00023869999995440594,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002528999998503423,
"count": 1,
"is_parallel": true,
"self": 0.0002528999998503423
},
"communicator.exchange": {
"total": 0.028661299999839684,
"count": 1,
"is_parallel": true,
"self": 0.028661299999839684
},
"steps_from_proto": {
"total": 0.0013159000000086962,
"count": 1,
"is_parallel": true,
"self": 0.00026789999992615776,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010480000000825385,
"count": 8,
"is_parallel": true,
"self": 0.0010480000000825385
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 90.79694839900321,
"count": 6308,
"is_parallel": true,
"self": 1.5050858869885815,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 1.172996385992974,
"count": 6308,
"is_parallel": true,
"self": 1.172996385992974
},
"communicator.exchange": {
"total": 83.50352456999826,
"count": 6308,
"is_parallel": true,
"self": 83.50352456999826
},
"steps_from_proto": {
"total": 4.615341556023395,
"count": 6308,
"is_parallel": true,
"self": 1.0762646860112,
"children": {
"_process_rank_one_or_two_observation": {
"total": 3.5390768700121953,
"count": 50464,
"is_parallel": true,
"self": 3.5390768700121953
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 70.6785492220015,
"count": 6309,
"self": 0.15120609997120482,
"children": {
"process_trajectory": {
"total": 9.888434483030323,
"count": 6309,
"self": 9.888434483030323
},
"_update_policy": {
"total": 60.638908638999965,
"count": 34,
"self": 34.277911250998386,
"children": {
"TorchPPOOptimizer.update": {
"total": 26.36099738800158,
"count": 1910,
"self": 26.36099738800158
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.000000212225132e-07,
"count": 1,
"self": 6.000000212225132e-07
},
"TrainerController._save_models": {
"total": 0.12099590199977683,
"count": 1,
"self": 0.0012509999996836996,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11974490200009313,
"count": 1,
"self": 0.11974490200009313
}
}
}
}
}
}
}