RL7 / run_logs /timers.json
aadickk's picture
First Push`
b9eb949 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.6622462272644043,
"min": 2.6489522457122803,
"max": 3.2560842037200928,
"count": 232
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 54863.5703125,
"min": 26214.986328125,
"max": 117321.2890625,
"count": 232
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 61.3,
"min": 52.92391304347826,
"max": 999.0,
"count": 232
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19616.0,
"min": 3996.0,
"max": 28732.0,
"count": 232
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1373.0927017474537,
"min": 1194.1977031132333,
"max": 1393.6827430538624,
"count": 226
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 219694.83227959258,
"min": 2388.7592550719364,
"max": 255413.63776337786,
"count": 226
},
"SoccerTwos.Step.mean": {
"value": 2439985.0,
"min": 129784.0,
"max": 2439985.0,
"count": 232
},
"SoccerTwos.Step.sum": {
"value": 2439985.0,
"min": 129784.0,
"max": 2439985.0,
"count": 232
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.030760126188397408,
"min": -0.0555557906627655,
"max": 0.1942271590232849,
"count": 232
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -4.952380180358887,
"min": -9.283685684204102,
"max": 30.101749420166016,
"count": 232
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.030960917472839355,
"min": -0.06245824322104454,
"max": 0.1976386159658432,
"count": 232
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -4.984707832336426,
"min": -10.742817878723145,
"max": 31.016311645507812,
"count": 232
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 232
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 232
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.037162734114605446,
"min": -0.75,
"max": 0.42153939243518945,
"count": 232
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -5.983200192451477,
"min": -47.031000316143036,
"max": 40.38080012798309,
"count": 232
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.037162734114605446,
"min": -0.75,
"max": 0.42153939243518945,
"count": 232
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -5.983200192451477,
"min": -47.031000316143036,
"max": 40.38080012798309,
"count": 232
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 232
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 232
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.015639586412968736,
"min": 0.012182302382037354,
"max": 0.024361702194437385,
"count": 109
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.015639586412968736,
"min": 0.012182302382037354,
"max": 0.024361702194437385,
"count": 109
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.07829210460186005,
"min": 8.909819055891906e-05,
"max": 0.08327072511116664,
"count": 109
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.07829210460186005,
"min": 8.909819055891906e-05,
"max": 0.08327072511116664,
"count": 109
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.0807863230506579,
"min": 8.961415805970318e-05,
"max": 0.08557597572604815,
"count": 109
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.0807863230506579,
"min": 8.961415805970318e-05,
"max": 0.08557597572604815,
"count": 109
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 109
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 109
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 109
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 109
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 109
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 109
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1732556672",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\adick\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1732561845"
},
"total": 5172.644456099952,
"count": 1,
"self": 0.56409459997667,
"children": {
"run_training.setup": {
"total": 0.08655519998865202,
"count": 1,
"self": 0.08655519998865202
},
"TrainerController.start_learning": {
"total": 5171.993806299986,
"count": 1,
"self": 3.1722849840298295,
"children": {
"TrainerController._reset_env": {
"total": 4.7166213999153115,
"count": 13,
"self": 4.7166213999153115
},
"TrainerController.advance": {
"total": 5163.979271216085,
"count": 153084,
"self": 3.0012293276377022,
"children": {
"env_step": {
"total": 2093.4836410930147,
"count": 153084,
"self": 1606.2386694894521,
"children": {
"SubprocessEnvManager._take_step": {
"total": 485.21356289525284,
"count": 153084,
"self": 16.603748686728068,
"children": {
"TorchPolicy.evaluate": {
"total": 468.60981420852477,
"count": 295424,
"self": 468.60981420852477
}
}
},
"workers": {
"total": 2.0314087083097547,
"count": 153084,
"self": 0.0,
"children": {
"worker_root": {
"total": 5164.992609376612,
"count": 153084,
"is_parallel": true,
"self": 3932.269313370809,
"children": {
"steps_from_proto": {
"total": 0.019773000210989267,
"count": 26,
"is_parallel": true,
"self": 0.0038857003673911095,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.015887299843598157,
"count": 104,
"is_parallel": true,
"self": 0.015887299843598157
}
}
},
"UnityEnvironment.step": {
"total": 1232.7035230055917,
"count": 153084,
"is_parallel": true,
"self": 71.5986019121483,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 65.53120428865077,
"count": 153084,
"is_parallel": true,
"self": 65.53120428865077
},
"communicator.exchange": {
"total": 878.8899658955634,
"count": 153084,
"is_parallel": true,
"self": 878.8899658955634
},
"steps_from_proto": {
"total": 216.68375090922927,
"count": 306168,
"is_parallel": true,
"self": 42.78485327609815,
"children": {
"_process_rank_one_or_two_observation": {
"total": 173.89889763313113,
"count": 1224672,
"is_parallel": true,
"self": 173.89889763313113
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 3067.4944007954327,
"count": 153083,
"self": 24.25711219961522,
"children": {
"process_trajectory": {
"total": 391.4392179962015,
"count": 153083,
"self": 391.017623696127,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4215943000745028,
"count": 4,
"self": 0.4215943000745028
}
}
},
"_update_policy": {
"total": 2651.798070599616,
"count": 109,
"self": 301.455592199869,
"children": {
"TorchPOCAOptimizer.update": {
"total": 2350.342478399747,
"count": 3285,
"self": 2350.342478399747
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.0999577827751637e-06,
"count": 1,
"self": 2.0999577827751637e-06
},
"TrainerController._save_models": {
"total": 0.12562659999821335,
"count": 1,
"self": 0.009685400000307709,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11594119999790564,
"count": 1,
"self": 0.11594119999790564
}
}
}
}
}
}
}