poca-SoccerTwos / run_logs /timers.json
PratikSahu's picture
First Push
f5fe1be
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.6733875274658203,
"min": 1.54535973072052,
"max": 3.2957680225372314,
"count": 1015
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 32932.265625,
"min": 24784.818359375,
"max": 110003.34375,
"count": 1015
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 73.28358208955224,
"min": 38.944,
"max": 999.0,
"count": 1015
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19640.0,
"min": 9896.0,
"max": 30272.0,
"count": 1015
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1604.4913136163316,
"min": 1197.9636370809403,
"max": 1644.5692666973036,
"count": 1010
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 215001.83602458844,
"min": 2399.954394668513,
"max": 383071.48428452644,
"count": 1010
},
"SoccerTwos.Step.mean": {
"value": 10149998.0,
"min": 9154.0,
"max": 10149998.0,
"count": 1015
},
"SoccerTwos.Step.sum": {
"value": 10149998.0,
"min": 9154.0,
"max": 10149998.0,
"count": 1015
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.033328767865896225,
"min": -0.13804522156715393,
"max": 0.16708561778068542,
"count": 1015
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -4.499383449554443,
"min": -27.747089385986328,
"max": 22.533878326416016,
"count": 1015
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.027855442836880684,
"min": -0.13877856731414795,
"max": 0.1622379869222641,
"count": 1015
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -3.7604846954345703,
"min": -27.894493103027344,
"max": 22.18732452392578,
"count": 1015
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1015
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1015
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.17867555529982956,
"min": -0.7402066648006439,
"max": 0.42132572191102163,
"count": 1015
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -24.12119996547699,
"min": -61.86719971895218,
"max": 68.27119982242584,
"count": 1015
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.17867555529982956,
"min": -0.7402066648006439,
"max": 0.42132572191102163,
"count": 1015
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -24.12119996547699,
"min": -61.86719971895218,
"max": 68.27119982242584,
"count": 1015
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1015
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1015
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.017461659600182127,
"min": 0.010983511048834771,
"max": 0.02463465513549939,
"count": 490
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.017461659600182127,
"min": 0.010983511048834771,
"max": 0.02463465513549939,
"count": 490
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10232649942239126,
"min": 0.00012617104875971562,
"max": 0.12690836166342098,
"count": 490
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10232649942239126,
"min": 0.00012617104875971562,
"max": 0.12690836166342098,
"count": 490
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10372459217905998,
"min": 0.0001274863961346758,
"max": 0.12863116388519605,
"count": 490
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10372459217905998,
"min": 0.0001274863961346758,
"max": 0.12863116388519605,
"count": 490
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 490
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 490
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 490
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 490
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 490
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 490
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1697010939",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/home/pratiksahu/anaconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu121",
"numpy_version": "1.21.2",
"end_time_seconds": "1697038329"
},
"total": 27390.272950756997,
"count": 1,
"self": 0.12002200999631896,
"children": {
"run_training.setup": {
"total": 0.01260914500016952,
"count": 1,
"self": 0.01260914500016952
},
"TrainerController.start_learning": {
"total": 27390.140319602,
"count": 1,
"self": 16.053767128374602,
"children": {
"TrainerController._reset_env": {
"total": 2.6578026099859926,
"count": 51,
"self": 2.6578026099859926
},
"TrainerController.advance": {
"total": 27371.28318441563,
"count": 699595,
"self": 16.338328312976955,
"children": {
"env_step": {
"total": 13453.082639340362,
"count": 699595,
"self": 10998.26593173443,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2445.015888093034,
"count": 699595,
"self": 87.34362612423956,
"children": {
"TorchPolicy.evaluate": {
"total": 2357.6722619687944,
"count": 1278194,
"self": 2357.6722619687944
}
}
},
"workers": {
"total": 9.80081951289958,
"count": 699595,
"self": 0.0,
"children": {
"worker_root": {
"total": 27356.016303455872,
"count": 699595,
"is_parallel": true,
"self": 18123.262498261232,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00869886599957681,
"count": 2,
"is_parallel": true,
"self": 0.0008132769999065204,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00788558899967029,
"count": 8,
"is_parallel": true,
"self": 0.00788558899967029
}
}
},
"UnityEnvironment.step": {
"total": 0.028004748999592266,
"count": 1,
"is_parallel": true,
"self": 0.0010529580004003947,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0009929059997375589,
"count": 1,
"is_parallel": true,
"self": 0.0009929059997375589
},
"communicator.exchange": {
"total": 0.023819101999833947,
"count": 1,
"is_parallel": true,
"self": 0.023819101999833947
},
"steps_from_proto": {
"total": 0.0021397829996203654,
"count": 2,
"is_parallel": true,
"self": 0.0004454340005395352,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016943489990808303,
"count": 8,
"is_parallel": true,
"self": 0.0016943489990808303
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 9232.659042750625,
"count": 699594,
"is_parallel": true,
"self": 617.3098799673953,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 517.3478126689351,
"count": 699594,
"is_parallel": true,
"self": 517.3478126689351
},
"communicator.exchange": {
"total": 6598.871512075196,
"count": 699594,
"is_parallel": true,
"self": 6598.871512075196
},
"steps_from_proto": {
"total": 1499.1298380391,
"count": 1399188,
"is_parallel": true,
"self": 267.4946303377519,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1231.635207701348,
"count": 5596752,
"is_parallel": true,
"self": 1231.635207701348
}
}
}
}
},
"steps_from_proto": {
"total": 0.09476244401503209,
"count": 100,
"is_parallel": true,
"self": 0.017355094018967065,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.07740734999606502,
"count": 400,
"is_parallel": true,
"self": 0.07740734999606502
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 13901.862216762293,
"count": 699595,
"self": 123.82960816949708,
"children": {
"process_trajectory": {
"total": 2435.25220416478,
"count": 699595,
"self": 2432.5660685357834,
"children": {
"RLTrainer._checkpoint": {
"total": 2.6861356289964533,
"count": 20,
"self": 2.6861356289964533
}
}
},
"_update_policy": {
"total": 11342.780404428015,
"count": 491,
"self": 1579.338189706019,
"children": {
"TorchPOCAOptimizer.update": {
"total": 9763.442214721996,
"count": 14717,
"self": 9763.442214721996
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0120056685991585e-06,
"count": 1,
"self": 1.0120056685991585e-06
},
"TrainerController._save_models": {
"total": 0.14556443600304192,
"count": 1,
"self": 0.0013899430050514638,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14417449299799046,
"count": 1,
"self": 0.14417449299799046
}
}
}
}
}
}
}