PyramidAgent / run_logs /timers.json
cmpatino's picture
First push
e89713c
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5553489923477173,
"min": 0.5553489923477173,
"max": 1.3712724447250366,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 16758.2109375,
"min": 16758.2109375,
"max": 41598.921875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989969.0,
"min": 29940.0,
"max": 989969.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989969.0,
"min": 29940.0,
"max": 989969.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.2689127027988434,
"min": -0.0872347503900528,
"max": 0.2689127027988434,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 69.64839172363281,
"min": -20.93634033203125,
"max": 69.64839172363281,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.022072983905673027,
"min": 0.006909343414008617,
"max": 0.4474110007286072,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 5.716902732849121,
"min": 1.7678422927856445,
"max": 106.03640747070312,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06849379548276331,
"min": 0.0652461227737429,
"max": 0.07396565231805047,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9589131367586864,
"min": 0.6504622445442398,
"max": 1.109484784770757,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014191582177383158,
"min": 0.0005619208560341938,
"max": 0.014191582177383158,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.1986821504833642,
"min": 0.007866891984478713,
"max": 0.2094002794571376,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.24916901221429e-06,
"min": 7.24916901221429e-06,
"max": 0.0002948761350412889,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010148836617100006,
"min": 0.00010148836617100006,
"max": 0.0036347539884153983,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10241635714285716,
"min": 0.10241635714285716,
"max": 0.19829204444444448,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4338290000000002,
"min": 1.4338290000000002,
"max": 2.6115846,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025139407857142877,
"min": 0.00025139407857142877,
"max": 0.00982937524,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035195171000000024,
"min": 0.0035195171000000024,
"max": 0.12117730154000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.012161307036876678,
"min": 0.011707554571330547,
"max": 0.4057108163833618,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1702582985162735,
"min": 0.1702582985162735,
"max": 3.651397466659546,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 522.6140350877193,
"min": 522.6140350877193,
"max": 998.1379310344828,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29789.0,
"min": 16611.0,
"max": 32920.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.1614912063406224,
"min": -0.9296069489470844,
"max": 1.1614912063406224,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 66.20499876141548,
"min": -30.65360176563263,
"max": 66.20499876141548,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.1614912063406224,
"min": -0.9296069489470844,
"max": 1.1614912063406224,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 66.20499876141548,
"min": -30.65360176563263,
"max": 66.20499876141548,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.06586148146577711,
"min": 0.06586148146577711,
"max": 8.006500035524368,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.754104443549295,
"min": 3.6697243319358677,
"max": 136.11050060391426,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1684800738",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1684802931"
},
"total": 2192.906165839,
"count": 1,
"self": 0.47448447100032354,
"children": {
"run_training.setup": {
"total": 0.037876761999996233,
"count": 1,
"self": 0.037876761999996233
},
"TrainerController.start_learning": {
"total": 2192.393804606,
"count": 1,
"self": 1.498494276969268,
"children": {
"TrainerController._reset_env": {
"total": 4.415871741000046,
"count": 1,
"self": 4.415871741000046
},
"TrainerController.advance": {
"total": 2186.386978914031,
"count": 63479,
"self": 1.4853020870609726,
"children": {
"env_step": {
"total": 1539.0212503669932,
"count": 63479,
"self": 1422.5305883250312,
"children": {
"SubprocessEnvManager._take_step": {
"total": 115.62372424297928,
"count": 63479,
"self": 5.109395967995283,
"children": {
"TorchPolicy.evaluate": {
"total": 110.514328274984,
"count": 62571,
"self": 110.514328274984
}
}
},
"workers": {
"total": 0.8669377989826899,
"count": 63479,
"self": 0.0,
"children": {
"worker_root": {
"total": 2186.9798168300313,
"count": 63479,
"is_parallel": true,
"self": 885.949442773079,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018862319999470856,
"count": 1,
"is_parallel": true,
"self": 0.000584677000119882,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013015549998272036,
"count": 8,
"is_parallel": true,
"self": 0.0013015549998272036
}
}
},
"UnityEnvironment.step": {
"total": 0.08947725999996692,
"count": 1,
"is_parallel": true,
"self": 0.0005890919999274047,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005082959999072045,
"count": 1,
"is_parallel": true,
"self": 0.0005082959999072045
},
"communicator.exchange": {
"total": 0.08011771400003909,
"count": 1,
"is_parallel": true,
"self": 0.08011771400003909
},
"steps_from_proto": {
"total": 0.008262158000093223,
"count": 1,
"is_parallel": true,
"self": 0.0004460970003492548,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.007816060999743968,
"count": 8,
"is_parallel": true,
"self": 0.007816060999743968
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1301.0303740569523,
"count": 63478,
"is_parallel": true,
"self": 32.45844834696322,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.882303714988666,
"count": 63478,
"is_parallel": true,
"self": 24.882303714988666
},
"communicator.exchange": {
"total": 1137.0955325470188,
"count": 63478,
"is_parallel": true,
"self": 1137.0955325470188
},
"steps_from_proto": {
"total": 106.59408944798145,
"count": 63478,
"is_parallel": true,
"self": 22.416734550168826,
"children": {
"_process_rank_one_or_two_observation": {
"total": 84.17735489781262,
"count": 507824,
"is_parallel": true,
"self": 84.17735489781262
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 645.8804264599767,
"count": 63479,
"self": 2.818831004911658,
"children": {
"process_trajectory": {
"total": 115.41120657106285,
"count": 63479,
"self": 115.21042983506254,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2007767360003072,
"count": 2,
"self": 0.2007767360003072
}
}
},
"_update_policy": {
"total": 527.6503888840022,
"count": 456,
"self": 341.9137686080203,
"children": {
"TorchPPOOptimizer.update": {
"total": 185.73662027598186,
"count": 22734,
"self": 185.73662027598186
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1429997357481625e-06,
"count": 1,
"self": 1.1429997357481625e-06
},
"TrainerController._save_models": {
"total": 0.09245853099992019,
"count": 1,
"self": 0.0014060630001040408,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09105246799981614,
"count": 1,
"self": 0.09105246799981614
}
}
}
}
}
}
}