poca-SoccerTwos / run_logs /timers.json
RamtinMoslemi's picture
First Push
eb903ee verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.1532409191131592,
"min": 1.1196668148040771,
"max": 3.2957353591918945,
"count": 1000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 23987.41015625,
"min": 7871.72021484375,
"max": 112492.0390625,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 62.79746835443038,
"min": 45.00917431192661,
"max": 999.0,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19844.0,
"min": 7992.0,
"max": 30788.0,
"count": 1000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1709.0109510622178,
"min": 1190.391640367494,
"max": 1720.9898581771342,
"count": 991
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 270023.7302678304,
"min": 2381.3820137520165,
"max": 367918.0238814893,
"count": 991
},
"SoccerTwos.Step.mean": {
"value": 9999962.0,
"min": 9412.0,
"max": 9999962.0,
"count": 1000
},
"SoccerTwos.Step.sum": {
"value": 9999962.0,
"min": 9412.0,
"max": 9999962.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.01338563859462738,
"min": -0.11655034869909286,
"max": 0.16845153272151947,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 2.1149308681488037,
"min": -22.494216918945312,
"max": 23.403825759887695,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.014068040996789932,
"min": -0.11802836507558823,
"max": 0.17341575026512146,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 2.222750425338745,
"min": -22.422487258911133,
"max": 22.71746253967285,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.0220329112644437,
"min": -0.5286444425582886,
"max": 0.40512195447596105,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -3.4811999797821045,
"min": -57.29039967060089,
"max": 47.71400010585785,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.0220329112644437,
"min": -0.5286444425582886,
"max": 0.40512195447596105,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -3.4811999797821045,
"min": -57.29039967060089,
"max": 47.71400010585785,
"count": 1000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.018708063105683927,
"min": 0.01079763230518438,
"max": 0.0237867681561814,
"count": 482
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.018708063105683927,
"min": 0.01079763230518438,
"max": 0.0237867681561814,
"count": 482
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10804408763845762,
"min": 0.0008606591868253115,
"max": 0.1229787160952886,
"count": 482
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10804408763845762,
"min": 0.0008606591868253115,
"max": 0.1229787160952886,
"count": 482
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10806638523936271,
"min": 0.0008650216366125581,
"max": 0.12314595381418864,
"count": 482
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10806638523936271,
"min": 0.0008650216366125581,
"max": 0.12314595381418864,
"count": 482
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 7.12599857500005e-07,
"min": 7.12599857500005e-07,
"max": 0.0004986521002695799,
"count": 482
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 7.12599857500005e-07,
"min": 7.12599857500005e-07,
"max": 0.0004986521002695799,
"count": 482
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.10014250000000001,
"min": 0.10014250000000001,
"max": 0.19973042,
"count": 482
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.10014250000000001,
"min": 0.10014250000000001,
"max": 0.19973042,
"count": 482
},
"SoccerTwos.Policy.Beta.mean": {
"value": 1.7110750000000053e-05,
"min": 1.7110750000000053e-05,
"max": 0.004986547957999999,
"count": 482
},
"SoccerTwos.Policy.Beta.sum": {
"value": 1.7110750000000053e-05,
"min": 1.7110750000000053e-05,
"max": 0.004986547957999999,
"count": 482
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1703855988",
"python_version": "3.10.12 (main, Jul 5 2023, 15:02:25) [Clang 14.0.6 ]",
"command_line_arguments": "/opt/homebrew/Caskroom/miniconda/base/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.app --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2",
"numpy_version": "1.23.5",
"end_time_seconds": "1703866208"
},
"total": 10219.73770537498,
"count": 1,
"self": 0.19493024999974295,
"children": {
"run_training.setup": {
"total": 0.013058999989880249,
"count": 1,
"self": 0.013058999989880249
},
"TrainerController.start_learning": {
"total": 10219.529716124991,
"count": 1,
"self": 4.421711311646504,
"children": {
"TrainerController._reset_env": {
"total": 1.822008292976534,
"count": 40,
"self": 1.822008292976534
},
"TrainerController.advance": {
"total": 10213.13215393637,
"count": 681034,
"self": 3.9921176846837625,
"children": {
"env_step": {
"total": 4570.680127619009,
"count": 681034,
"self": 3851.7968664654472,
"children": {
"SubprocessEnvManager._take_step": {
"total": 715.9787979857938,
"count": 681034,
"self": 21.6306824052881,
"children": {
"TorchPolicy.evaluate": {
"total": 694.3481155805057,
"count": 1257830,
"self": 694.3481155805057
}
}
},
"workers": {
"total": 2.9044631677679718,
"count": 681034,
"self": 0.0,
"children": {
"worker_root": {
"total": 10211.220314579754,
"count": 681034,
"is_parallel": true,
"self": 6963.624994570651,
"children": {
"steps_from_proto": {
"total": 0.025804257107665762,
"count": 80,
"is_parallel": true,
"self": 0.00424826008384116,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.021555997023824602,
"count": 320,
"is_parallel": true,
"self": 0.021555997023824602
}
}
},
"UnityEnvironment.step": {
"total": 3247.569515751995,
"count": 681034,
"is_parallel": true,
"self": 157.49633839310263,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 75.07220318031614,
"count": 681034,
"is_parallel": true,
"self": 75.07220318031614
},
"communicator.exchange": {
"total": 2617.271251162514,
"count": 681034,
"is_parallel": true,
"self": 2617.271251162514
},
"steps_from_proto": {
"total": 397.72972301606205,
"count": 1362068,
"is_parallel": true,
"self": 61.088766790606314,
"children": {
"_process_rank_one_or_two_observation": {
"total": 336.64095622545574,
"count": 5448272,
"is_parallel": true,
"self": 336.64095622545574
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 5638.459908632678,
"count": 681034,
"self": 41.62850201164838,
"children": {
"process_trajectory": {
"total": 1114.3295763299102,
"count": 681034,
"self": 1111.2247817909229,
"children": {
"RLTrainer._checkpoint": {
"total": 3.104794538987335,
"count": 20,
"self": 3.104794538987335
}
}
},
"_update_policy": {
"total": 4482.501830291119,
"count": 482,
"self": 476.7318739280745,
"children": {
"TorchPOCAOptimizer.update": {
"total": 4005.769956363045,
"count": 14469,
"self": 4005.769956363045
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.500019036233425e-07,
"count": 1,
"self": 2.500019036233425e-07
},
"TrainerController._save_models": {
"total": 0.15384233399527147,
"count": 1,
"self": 0.0008283340139314532,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15301399998134002,
"count": 1,
"self": 0.15301399998134002
}
}
}
}
}
}
}