María Navas Loro
First Push hf-rl-SoccerTwos2
9dbf6a8
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.1712639331817627,
"min": 3.1573662757873535,
"max": 3.29573655128479,
"count": 76
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 99349.359375,
"min": 20475.2265625,
"max": 108897.265625,
"count": 76
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 898.0,
"min": 486.1,
"max": 999.0,
"count": 76
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 21552.0,
"min": 16396.0,
"max": 25764.0,
"count": 76
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1192.5906637010683,
"min": 1186.9481478759558,
"max": 1198.0223623937807,
"count": 57
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 2385.1813274021365,
"min": 2376.5993650059045,
"max": 14376.268348725367,
"count": 57
},
"SoccerTwos.Step.mean": {
"value": 759528.0,
"min": 9900.0,
"max": 759528.0,
"count": 76
},
"SoccerTwos.Step.sum": {
"value": 759528.0,
"min": 9900.0,
"max": 759528.0,
"count": 76
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.002835378283634782,
"min": -0.04006249085068703,
"max": 0.0004909670096822083,
"count": 76
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.03118916042149067,
"min": -0.5245926380157471,
"max": 0.005891604349017143,
"count": 76
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 6.885013590363087e-07,
"min": -0.04158465564250946,
"max": 0.001044685603119433,
"count": 76
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 7.573515176773071e-06,
"min": -0.5734260678291321,
"max": 0.012536226771771908,
"count": 76
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 76
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 76
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.11021817814220082,
"min": -0.625,
"max": 0.27102856976645334,
"count": 76
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 1.212399959564209,
"min": -10.0,
"max": 3.7943999767303467,
"count": 76
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.11021817814220082,
"min": -0.625,
"max": 0.27102856976645334,
"count": 76
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 1.212399959564209,
"min": -10.0,
"max": 3.7943999767303467,
"count": 76
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 76
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 76
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.0171696433913894,
"min": 0.012868598999557435,
"max": 0.02073942868349453,
"count": 35
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.0171696433913894,
"min": 0.012868598999557435,
"max": 0.02073942868349453,
"count": 35
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.001642792220809497,
"min": 5.206810995635654e-06,
"max": 0.0032048087411870557,
"count": 35
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.001642792220809497,
"min": 5.206810995635654e-06,
"max": 0.0032048087411870557,
"count": 35
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.0018860926812825105,
"min": 4.903067307774715e-06,
"max": 0.00320571749471128,
"count": 35
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.0018860926812825105,
"min": 4.903067307774715e-06,
"max": 0.00320571749471128,
"count": 35
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 35
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 35
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 35
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 35
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 35
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 35
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676557968",
"python_version": "3.9.16 (main, Jan 11 2023, 16:16:36) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\mnavas\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./SoccerTwos/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.13.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1676564119"
},
"total": 6150.8290296000005,
"count": 1,
"self": 0.014421700000639248,
"children": {
"run_training.setup": {
"total": 0.23115350000000046,
"count": 1,
"self": 0.23115350000000046
},
"TrainerController.start_learning": {
"total": 6150.5834544,
"count": 1,
"self": 4.1555857000385,
"children": {
"TrainerController._reset_env": {
"total": 7.383585299999494,
"count": 4,
"self": 7.383585299999494
},
"TrainerController.advance": {
"total": 6138.667258799962,
"count": 50042,
"self": 3.9021639999537,
"children": {
"env_step": {
"total": 2834.43530179998,
"count": 50042,
"self": 2199.4955918998703,
"children": {
"SubprocessEnvManager._take_step": {
"total": 632.802152400106,
"count": 50042,
"self": 22.6388087001651,
"children": {
"TorchPolicy.evaluate": {
"total": 610.1633436999409,
"count": 99410,
"self": 610.1633436999409
}
}
},
"workers": {
"total": 2.137557500003572,
"count": 50042,
"self": 0.0,
"children": {
"worker_root": {
"total": 6122.590419900036,
"count": 50042,
"is_parallel": true,
"self": 4365.501122100103,
"children": {
"steps_from_proto": {
"total": 0.022993599999321823,
"count": 8,
"is_parallel": true,
"self": 0.0050461999979285466,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.017947400001393277,
"count": 32,
"is_parallel": true,
"self": 0.017947400001393277
}
}
},
"UnityEnvironment.step": {
"total": 1757.0663041999337,
"count": 50042,
"is_parallel": true,
"self": 88.60137139993276,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 71.22233490001281,
"count": 50042,
"is_parallel": true,
"self": 71.22233490001281
},
"communicator.exchange": {
"total": 1303.1713972000143,
"count": 50042,
"is_parallel": true,
"self": 1303.1713972000143
},
"steps_from_proto": {
"total": 294.0712006999737,
"count": 100084,
"is_parallel": true,
"self": 59.503958499919975,
"children": {
"_process_rank_one_or_two_observation": {
"total": 234.56724220005373,
"count": 400336,
"is_parallel": true,
"self": 234.56724220005373
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 3300.3297930000285,
"count": 50042,
"self": 20.961445000039475,
"children": {
"process_trajectory": {
"total": 478.9516634999884,
"count": 50042,
"self": 478.14626179998845,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8054016999999476,
"count": 1,
"self": 0.8054016999999476
}
}
},
"_update_policy": {
"total": 2800.4166845000004,
"count": 36,
"self": 323.7601483999956,
"children": {
"TorchPOCAOptimizer.update": {
"total": 2476.656536100005,
"count": 1056,
"self": 2476.656536100005
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.2999996619764715e-06,
"count": 1,
"self": 3.2999996619764715e-06
},
"TrainerController._save_models": {
"total": 0.3770213000007061,
"count": 1,
"self": 0.00428130000091187,
"children": {
"RLTrainer._checkpoint": {
"total": 0.37273999999979424,
"count": 1,
"self": 0.37273999999979424
}
}
}
}
}
}
}