poca-SoccerTwos / run_logs /timers.json
cafernandez's picture
Upload 13 files
e0153fc verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.285998582839966,
"min": 3.2672150135040283,
"max": 3.2957563400268555,
"count": 50
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 88327.640625,
"min": 27892.60546875,
"max": 117716.6015625,
"count": 50
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 417.3333333333333,
"max": 999.0,
"count": 50
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19980.0,
"min": 15396.0,
"max": 26208.0,
"count": 50
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1184.0528148766145,
"min": 1183.0530441429582,
"max": 1199.4660672222722,
"count": 43
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 11840.528148766145,
"min": 2370.961219735413,
"max": 16792.52494111181,
"count": 43
},
"SoccerTwos.Step.mean": {
"value": 499418.0,
"min": 9126.0,
"max": 499418.0,
"count": 50
},
"SoccerTwos.Step.sum": {
"value": 499418.0,
"min": 9126.0,
"max": 499418.0,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.013640234246850014,
"min": -0.08858286589384079,
"max": -0.004238549619913101,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.13640233874320984,
"min": -1.5944916009902954,
"max": -0.04238549619913101,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.013837648555636406,
"min": -0.08846139162778854,
"max": -0.005063777323812246,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.13837648928165436,
"min": -1.5922682285308838,
"max": -0.05234326422214508,
"count": 50
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 50
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.014880000054836274,
"min": -0.5489600010216236,
"max": 0.21036922931671143,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 0.14880000054836273,
"min": -8.234400015324354,
"max": 2.7347999811172485,
"count": 50
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.014880000054836274,
"min": -0.5489600010216236,
"max": 0.21036922931671143,
"count": 50
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 0.14880000054836273,
"min": -8.234400015324354,
"max": 2.7347999811172485,
"count": 50
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.02153375749476254,
"min": 0.010410184945794754,
"max": 0.024094702501315624,
"count": 23
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.02153375749476254,
"min": 0.010410184945794754,
"max": 0.024094702501315624,
"count": 23
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0013284804299473763,
"min": 0.00024111115490086377,
"max": 0.007237169751897454,
"count": 23
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0013284804299473763,
"min": 0.00024111115490086377,
"max": 0.007237169751897454,
"count": 23
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.0013349808636121453,
"min": 0.00024450847558910027,
"max": 0.006368812778964639,
"count": 23
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.0013349808636121453,
"min": 0.00024450847558910027,
"max": 0.006368812778964639,
"count": 23
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 23
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 23
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.2,
"min": 0.2,
"max": 0.2,
"count": 23
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.2,
"min": 0.2,
"max": 0.2,
"count": 23
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.004999999999999999,
"min": 0.004999999999999999,
"max": 0.004999999999999999,
"count": 23
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.004999999999999999,
"min": 0.004999999999999999,
"max": 0.004999999999999999,
"count": 23
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1731697797",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\cafer\\miniconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1731698858"
},
"total": 1061.392763200216,
"count": 1,
"self": 1.5768440002575517,
"children": {
"run_training.setup": {
"total": 0.13145879982039332,
"count": 1,
"self": 0.13145879982039332
},
"TrainerController.start_learning": {
"total": 1059.684460400138,
"count": 1,
"self": 0.9244102146476507,
"children": {
"TrainerController._reset_env": {
"total": 6.319037399720401,
"count": 3,
"self": 6.319037399720401
},
"TrainerController.advance": {
"total": 1052.2923052855767,
"count": 33175,
"self": 0.9748070519417524,
"children": {
"env_step": {
"total": 733.6119241900742,
"count": 33175,
"self": 581.3141815788113,
"children": {
"SubprocessEnvManager._take_step": {
"total": 151.68048197589815,
"count": 33175,
"self": 7.193980729673058,
"children": {
"TorchPolicy.evaluate": {
"total": 144.4865012462251,
"count": 65900,
"self": 144.4865012462251
}
}
},
"workers": {
"total": 0.6172606353648007,
"count": 33175,
"self": 0.0,
"children": {
"worker_root": {
"total": 1053.0294138188474,
"count": 33175,
"is_parallel": true,
"self": 601.3654705402441,
"children": {
"steps_from_proto": {
"total": 0.007623199839144945,
"count": 6,
"is_parallel": true,
"self": 0.0014385012909770012,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.006184698548167944,
"count": 24,
"is_parallel": true,
"self": 0.006184698548167944
}
}
},
"UnityEnvironment.step": {
"total": 451.6563200787641,
"count": 33175,
"is_parallel": true,
"self": 23.378545485436916,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 29.492051992099732,
"count": 33175,
"is_parallel": true,
"self": 29.492051992099732
},
"communicator.exchange": {
"total": 326.7904810588807,
"count": 33175,
"is_parallel": true,
"self": 326.7904810588807
},
"steps_from_proto": {
"total": 71.99524154234678,
"count": 66350,
"is_parallel": true,
"self": 13.266702304594219,
"children": {
"_process_rank_one_or_two_observation": {
"total": 58.72853923775256,
"count": 265400,
"is_parallel": true,
"self": 58.72853923775256
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 317.7055740435608,
"count": 33175,
"self": 7.006266000214964,
"children": {
"process_trajectory": {
"total": 103.28810014342889,
"count": 33175,
"self": 103.04438654333353,
"children": {
"RLTrainer._checkpoint": {
"total": 0.24371360009536147,
"count": 1,
"self": 0.24371360009536147
}
}
},
"_update_policy": {
"total": 207.41120789991692,
"count": 23,
"self": 33.33055819943547,
"children": {
"TorchPOCAOptimizer.update": {
"total": 174.08064970048144,
"count": 230,
"self": 174.08064970048144
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.000240445137024e-06,
"count": 1,
"self": 1.000240445137024e-06
},
"TrainerController._save_models": {
"total": 0.14870649995282292,
"count": 1,
"self": 0.012371799908578396,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13633470004424453,
"count": 1,
"self": 0.13633470004424453
}
}
}
}
}
}
}