odeshays's picture
First Push
b5fd0c3
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.2998795807361603,
"min": 0.2998795807361603,
"max": 1.4496777057647705,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 8967.5986328125,
"min": 8967.5986328125,
"max": 43977.421875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989956.0,
"min": 29952.0,
"max": 989956.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989956.0,
"min": 29952.0,
"max": 989956.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6211113929748535,
"min": -0.10344476997852325,
"max": 0.6849846839904785,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 176.3956298828125,
"min": -24.826744079589844,
"max": 195.22064208984375,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.01656731404364109,
"min": 0.0066413888707757,
"max": 0.2593795359134674,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.705117225646973,
"min": 1.8463060855865479,
"max": 61.47294998168945,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0680502437268539,
"min": 0.06624737396650081,
"max": 0.07324356189605101,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9527034121759548,
"min": 0.493056342613063,
"max": 1.045916427722356,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016668645421131733,
"min": 0.0006032778104597699,
"max": 0.01732480638574565,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23336103589584428,
"min": 0.007239333725517239,
"max": 0.24254728940043907,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.345468980114285e-06,
"min": 7.345468980114285e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001028365657216,
"min": 0.0001028365657216,
"max": 0.0035077301307567003,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10244845714285715,
"min": 0.10244845714285715,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4342784000000002,
"min": 1.3886848,
"max": 2.5692432999999997,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002546008685714286,
"min": 0.0002546008685714286,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00356441216,
"min": 0.00356441216,
"max": 0.11694740567,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011610016226768494,
"min": 0.011610016226768494,
"max": 0.34201645851135254,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1625402271747589,
"min": 0.1625402271747589,
"max": 2.3941152095794678,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 300.3207547169811,
"min": 295.4183673469388,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31834.0,
"min": 15984.0,
"max": 32710.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6808056418063506,
"min": -1.0000000521540642,
"max": 1.7045816140515464,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 178.16539803147316,
"min": -30.725001737475395,
"max": 178.16539803147316,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6808056418063506,
"min": -1.0000000521540642,
"max": 1.7045816140515464,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 178.16539803147316,
"min": -30.725001737475395,
"max": 178.16539803147316,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03615061890899219,
"min": 0.03615061890899219,
"max": 7.062693669460714,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.8319656043531722,
"min": 3.583511717741203,
"max": 113.00309871137142,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1686002584",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1686004782"
},
"total": 2197.493461491,
"count": 1,
"self": 0.47530546899997717,
"children": {
"run_training.setup": {
"total": 0.05878056699998524,
"count": 1,
"self": 0.05878056699998524
},
"TrainerController.start_learning": {
"total": 2196.959375455,
"count": 1,
"self": 1.2388562359615207,
"children": {
"TrainerController._reset_env": {
"total": 4.325028324999948,
"count": 1,
"self": 4.325028324999948
},
"TrainerController.advance": {
"total": 2191.3029616150384,
"count": 64000,
"self": 1.2815704500203537,
"children": {
"env_step": {
"total": 1555.7241794750098,
"count": 64000,
"self": 1449.964436231954,
"children": {
"SubprocessEnvManager._take_step": {
"total": 105.0128216770429,
"count": 64000,
"self": 4.608041462992787,
"children": {
"TorchPolicy.evaluate": {
"total": 100.4047802140501,
"count": 62562,
"self": 100.4047802140501
}
}
},
"workers": {
"total": 0.7469215660128157,
"count": 64000,
"self": 0.0,
"children": {
"worker_root": {
"total": 2192.311874466068,
"count": 64000,
"is_parallel": true,
"self": 850.1238659970991,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0027934869999626244,
"count": 1,
"is_parallel": true,
"self": 0.0009180469999137131,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018754400000489113,
"count": 8,
"is_parallel": true,
"self": 0.0018754400000489113
}
}
},
"UnityEnvironment.step": {
"total": 0.04482490999998845,
"count": 1,
"is_parallel": true,
"self": 0.0006098199999087228,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004734430001462897,
"count": 1,
"is_parallel": true,
"self": 0.0004734430001462897
},
"communicator.exchange": {
"total": 0.04205245000002833,
"count": 1,
"is_parallel": true,
"self": 0.04205245000002833
},
"steps_from_proto": {
"total": 0.001689196999905107,
"count": 1,
"is_parallel": true,
"self": 0.00032643899999129644,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013627579999138106,
"count": 8,
"is_parallel": true,
"self": 0.0013627579999138106
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1342.1880084689687,
"count": 63999,
"is_parallel": true,
"self": 31.031308241090528,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.3780337449557,
"count": 63999,
"is_parallel": true,
"self": 22.3780337449557
},
"communicator.exchange": {
"total": 1192.7251061700053,
"count": 63999,
"is_parallel": true,
"self": 1192.7251061700053
},
"steps_from_proto": {
"total": 96.05356031291717,
"count": 63999,
"is_parallel": true,
"self": 19.300981968859332,
"children": {
"_process_rank_one_or_two_observation": {
"total": 76.75257834405784,
"count": 511992,
"is_parallel": true,
"self": 76.75257834405784
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 634.2972116900085,
"count": 64000,
"self": 2.46552353801917,
"children": {
"process_trajectory": {
"total": 106.53733751398977,
"count": 64000,
"self": 106.27801345098987,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2593240629998945,
"count": 2,
"self": 0.2593240629998945
}
}
},
"_update_policy": {
"total": 525.2943506379995,
"count": 450,
"self": 337.8089235699963,
"children": {
"TorchPPOOptimizer.update": {
"total": 187.48542706800322,
"count": 22812,
"self": 187.48542706800322
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.650000952708069e-07,
"count": 1,
"self": 7.650000952708069e-07
},
"TrainerController._save_models": {
"total": 0.09252851400015061,
"count": 1,
"self": 0.0013740320000579231,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09115448200009268,
"count": 1,
"self": 0.09115448200009268
}
}
}
}
}
}
}