DarkAirforce's picture
fourth Push
5710b2b
raw
history blame
20.1 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.1804959774017334,
"min": 3.1409945487976074,
"max": 3.2956602573394775,
"count": 91
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 72159.09375,
"min": 26432.720703125,
"max": 120394.4921875,
"count": 91
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 472.4,
"max": 999.0,
"count": 91
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19980.0,
"min": 16528.0,
"max": 25392.0,
"count": 91
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1190.5854951288322,
"min": 1190.5854951288322,
"max": 1201.070723271109,
"count": 72
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 2381.1709902576645,
"min": 2381.1709902576645,
"max": 14412.84867925331,
"count": 72
},
"SoccerTwos.Step.mean": {
"value": 909724.0,
"min": 9716.0,
"max": 909724.0,
"count": 91
},
"SoccerTwos.Step.sum": {
"value": 909724.0,
"min": 9716.0,
"max": 909724.0,
"count": 91
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.002927738009020686,
"min": -0.08798151463270187,
"max": 0.018709352239966393,
"count": 91
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.02927738055586815,
"min": -1.4956780672073364,
"max": 0.2226552814245224,
"count": 91
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.0028058295138180256,
"min": -0.08797135949134827,
"max": 0.015475521795451641,
"count": 91
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.02805829420685768,
"min": -1.4955130815505981,
"max": 0.1857062578201294,
"count": 91
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 91
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 91
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.0,
"min": -0.5384615384615384,
"max": 0.3431142909186227,
"count": 91
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 0.0,
"min": -7.0,
"max": 4.803600072860718,
"count": 91
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.0,
"min": -0.5384615384615384,
"max": 0.3431142909186227,
"count": 91
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 0.0,
"min": -7.0,
"max": 4.803600072860718,
"count": 91
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 91
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 91
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01436913014234354,
"min": 0.012494011122907978,
"max": 0.02099306440213695,
"count": 42
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01436913014234354,
"min": 0.012494011122907978,
"max": 0.02099306440213695,
"count": 42
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 2.395659788210954e-05,
"min": 1.8842826299684627e-05,
"max": 0.005686190662284692,
"count": 42
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 2.395659788210954e-05,
"min": 1.8842826299684627e-05,
"max": 0.005686190662284692,
"count": 42
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 2.430871487983192e-05,
"min": 1.9840847729331777e-05,
"max": 0.0053716045881931984,
"count": 42
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 2.430871487983192e-05,
"min": 1.9840847729331777e-05,
"max": 0.0053716045881931984,
"count": 42
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 42
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 42
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 42
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 42
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 42
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 42
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1692041194",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/home/ajarmon/unit7/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1692042986"
},
"total": 1792.2532448459997,
"count": 1,
"self": 0.05396196699985012,
"children": {
"run_training.setup": {
"total": 0.029793196000355238,
"count": 1,
"self": 0.029793196000355238
},
"TrainerController.start_learning": {
"total": 1792.1694896829995,
"count": 1,
"self": 0.8797533120778098,
"children": {
"TrainerController._reset_env": {
"total": 0.8026297159995011,
"count": 5,
"self": 0.8026297159995011
},
"TrainerController.advance": {
"total": 1790.2718881879218,
"count": 60047,
"self": 0.8770120679118918,
"children": {
"env_step": {
"total": 883.6539523090278,
"count": 60047,
"self": 718.0160349158878,
"children": {
"SubprocessEnvManager._take_step": {
"total": 165.06100093811528,
"count": 60047,
"self": 7.0495956661479795,
"children": {
"TorchPolicy.evaluate": {
"total": 158.0114052719673,
"count": 119224,
"self": 158.0114052719673
}
}
},
"workers": {
"total": 0.5769164550247297,
"count": 60046,
"self": 0.0,
"children": {
"worker_root": {
"total": 1790.262841398986,
"count": 60046,
"is_parallel": true,
"self": 1193.1322963410512,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0022962729999562725,
"count": 2,
"is_parallel": true,
"self": 0.0005607760003840667,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017354969995722058,
"count": 8,
"is_parallel": true,
"self": 0.0017354969995722058
}
}
},
"UnityEnvironment.step": {
"total": 0.019591321000007156,
"count": 1,
"is_parallel": true,
"self": 0.00041642899941507494,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005763119997936883,
"count": 1,
"is_parallel": true,
"self": 0.0005763119997936883
},
"communicator.exchange": {
"total": 0.017279620000408613,
"count": 1,
"is_parallel": true,
"self": 0.017279620000408613
},
"steps_from_proto": {
"total": 0.0013189600003897795,
"count": 2,
"is_parallel": true,
"self": 0.00025233100132027175,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010666289990695077,
"count": 8,
"is_parallel": true,
"self": 0.0010666289990695077
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 597.124982495935,
"count": 60045,
"is_parallel": true,
"self": 35.635734673035586,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 27.653596939924682,
"count": 60045,
"is_parallel": true,
"self": 27.653596939924682
},
"communicator.exchange": {
"total": 426.45780201993966,
"count": 60045,
"is_parallel": true,
"self": 426.45780201993966
},
"steps_from_proto": {
"total": 107.37784886303507,
"count": 120090,
"is_parallel": true,
"self": 18.11604715721296,
"children": {
"_process_rank_one_or_two_observation": {
"total": 89.26180170582211,
"count": 480360,
"is_parallel": true,
"self": 89.26180170582211
}
}
}
}
},
"steps_from_proto": {
"total": 0.0055625619997954345,
"count": 8,
"is_parallel": true,
"self": 0.001082365000002028,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.004480196999793407,
"count": 32,
"is_parallel": true,
"self": 0.004480196999793407
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 905.7409238109822,
"count": 60046,
"self": 8.635388114003035,
"children": {
"process_trajectory": {
"total": 113.29608560997167,
"count": 60046,
"self": 113.15153475297211,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14455085699955816,
"count": 1,
"self": 0.14455085699955816
}
}
},
"_update_policy": {
"total": 783.8094500870075,
"count": 43,
"self": 119.34647527300513,
"children": {
"TorchPOCAOptimizer.update": {
"total": 664.4629748140023,
"count": 1290,
"self": 664.4629748140023
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0060002750833519e-06,
"count": 1,
"self": 1.0060002750833519e-06
},
"TrainerController._save_models": {
"total": 0.21521746100006567,
"count": 1,
"self": 0.0012843600006817724,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2139331009993839,
"count": 1,
"self": 0.2139331009993839
}
}
}
}
}
}
}