testpyramidsrnd / run_logs /timers.json
jcmc's picture
First Pyramids
9c2cc70
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.861789882183075,
"min": 0.822840690612793,
"max": 1.5123157501220703,
"count": 16
},
"Pyramids.Policy.Entropy.sum": {
"value": 25881.2734375,
"min": 24856.37109375,
"max": 45877.609375,
"count": 16
},
"Pyramids.Step.mean": {
"value": 479958.0,
"min": 29952.0,
"max": 479958.0,
"count": 16
},
"Pyramids.Step.sum": {
"value": 479958.0,
"min": 29952.0,
"max": 479958.0,
"count": 16
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.0619308240711689,
"min": -0.27612510323524475,
"max": -0.02315995655953884,
"count": 16
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -14.925328254699707,
"min": -65.441650390625,
"max": -5.604709625244141,
"count": 16
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.01824645884335041,
"min": 0.01824645884335041,
"max": 0.20356936752796173,
"count": 16
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.397396564483643,
"min": 4.397396564483643,
"max": 48.245941162109375,
"count": 16
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07199342416516484,
"min": 0.06341957829926712,
"max": 0.07231596507810235,
"count": 16
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0079079383123077,
"min": 0.48011929747928206,
"max": 1.0079079383123077,
"count": 16
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0017400070325559527,
"min": 0.0008931673021241397,
"max": 0.005379896306699258,
"count": 16
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.02436009845578334,
"min": 0.006252171114868978,
"max": 0.06286599642191192,
"count": 16
},
"Pyramids.Policy.LearningRate.mean": {
"value": 2.0894150178171423e-05,
"min": 2.0894150178171423e-05,
"max": 0.00029030126037577137,
"count": 16
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0002925181024943999,
"min": 0.0002925181024943999,
"max": 0.0028522970492344,
"count": 16
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10696468571428572,
"min": 0.10696468571428572,
"max": 0.19676708571428575,
"count": 16
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4975056,
"min": 1.3382272,
"max": 2.2504982000000004,
"count": 16
},
"Pyramids.Policy.Beta.mean": {
"value": 0.000705772102857143,
"min": 0.000705772102857143,
"max": 0.00967703186285714,
"count": 16
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00988080944,
"min": 0.00988080944,
"max": 0.09510148344000001,
"count": 16
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01656120829284191,
"min": 0.01656120829284191,
"max": 0.2877659201622009,
"count": 16
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.23185691237449646,
"min": 0.23185691237449646,
"max": 2.0143613815307617,
"count": 16
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 986.1515151515151,
"min": 872.1111111111111,
"max": 999.0,
"count": 16
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 32543.0,
"min": 15984.0,
"max": 32543.0,
"count": 16
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.8048909654219946,
"min": -1.0000000521540642,
"max": -0.3170722694032722,
"count": 16
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -26.56140185892582,
"min": -32.000001668930054,
"max": -11.4146016985178,
"count": 16
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.8048909654219946,
"min": -1.0000000521540642,
"max": -0.3170722694032722,
"count": 16
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -26.56140185892582,
"min": -32.000001668930054,
"max": -11.4146016985178,
"count": 16
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.1705476206473329,
"min": 0.1684824346749369,
"max": 5.716537716798484,
"count": 16
},
"Pyramids.Policy.RndReward.sum": {
"value": 5.628071481361985,
"min": 4.88599060557317,
"max": 91.46460346877575,
"count": 16
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1661744314",
"python_version": "3.7.13 (default, Apr 24 2022, 01:04:09) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./trained-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1661745277"
},
"total": 963.33856864,
"count": 1,
"self": 0.4856121869997878,
"children": {
"run_training.setup": {
"total": 0.043232711000086965,
"count": 1,
"self": 0.043232711000086965
},
"TrainerController.start_learning": {
"total": 962.8097237420001,
"count": 1,
"self": 0.6472797960047956,
"children": {
"TrainerController._reset_env": {
"total": 10.606990282000083,
"count": 1,
"self": 10.606990282000083
},
"TrainerController.advance": {
"total": 951.4637753989953,
"count": 31556,
"self": 0.6877724950250013,
"children": {
"env_step": {
"total": 599.7054622799799,
"count": 31556,
"self": 547.5674451149644,
"children": {
"SubprocessEnvManager._take_step": {
"total": 51.787321375997635,
"count": 31556,
"self": 2.221103380982754,
"children": {
"TorchPolicy.evaluate": {
"total": 49.56621799501488,
"count": 31314,
"self": 17.128527737000695,
"children": {
"TorchPolicy.sample_actions": {
"total": 32.437690258014186,
"count": 31314,
"self": 32.437690258014186
}
}
}
}
},
"workers": {
"total": 0.350695789017891,
"count": 31556,
"self": 0.0,
"children": {
"worker_root": {
"total": 960.8051840559832,
"count": 31556,
"is_parallel": true,
"self": 463.8125984699875,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005846923000035531,
"count": 1,
"is_parallel": true,
"self": 0.004639601999997467,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001207321000038064,
"count": 8,
"is_parallel": true,
"self": 0.001207321000038064
}
}
},
"UnityEnvironment.step": {
"total": 0.054551375000073676,
"count": 1,
"is_parallel": true,
"self": 0.0005636420000882936,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045906200000445097,
"count": 1,
"is_parallel": true,
"self": 0.00045906200000445097
},
"communicator.exchange": {
"total": 0.05191533400000026,
"count": 1,
"is_parallel": true,
"self": 0.05191533400000026
},
"steps_from_proto": {
"total": 0.0016133369999806746,
"count": 1,
"is_parallel": true,
"self": 0.0004396140001290405,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001173722999851634,
"count": 8,
"is_parallel": true,
"self": 0.001173722999851634
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 496.9925855859957,
"count": 31555,
"is_parallel": true,
"self": 13.574127931992166,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 11.477071106006747,
"count": 31555,
"is_parallel": true,
"self": 11.477071106006747
},
"communicator.exchange": {
"total": 426.2884331349982,
"count": 31555,
"is_parallel": true,
"self": 426.2884331349982
},
"steps_from_proto": {
"total": 45.65295341299861,
"count": 31555,
"is_parallel": true,
"self": 11.243690411011926,
"children": {
"_process_rank_one_or_two_observation": {
"total": 34.40926300198669,
"count": 252440,
"is_parallel": true,
"self": 34.40926300198669
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 351.0705406239904,
"count": 31556,
"self": 1.0582652820020257,
"children": {
"process_trajectory": {
"total": 78.03843139199194,
"count": 31556,
"self": 77.93377817699184,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1046532150000985,
"count": 1,
"self": 0.1046532150000985
}
}
},
"_update_policy": {
"total": 271.9738439499964,
"count": 208,
"self": 106.70536197901095,
"children": {
"TorchPPOOptimizer.update": {
"total": 165.26848197098548,
"count": 11412,
"self": 165.26848197098548
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.320999899689923e-06,
"count": 1,
"self": 1.320999899689923e-06
},
"TrainerController._save_models": {
"total": 0.09167694400002802,
"count": 1,
"self": 0.001547810000147365,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09012913399988065,
"count": 1,
"self": 0.09012913399988065
}
}
}
}
}
}
}