poca-SoccerTwos / run_logs /timers.json
johnnyf's picture
First Push
496d6f9 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.205953598022461,
"min": 3.1314074993133545,
"max": 3.295724391937256,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 53962.609375,
"min": 14365.044921875,
"max": 133422.21875,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 710.5,
"min": 417.0833333333333,
"max": 999.0,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 17052.0,
"min": 14820.0,
"max": 26112.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1184.897121839943,
"min": 1184.897121839943,
"max": 1209.7504509877854,
"count": 405
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 7109.382731039659,
"min": 2372.098120011844,
"max": 21708.334058858858,
"count": 405
},
"SoccerTwos.Step.mean": {
"value": 4999790.0,
"min": 9072.0,
"max": 4999790.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 4999790.0,
"min": 9072.0,
"max": 4999790.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.008271324448287487,
"min": -0.02977818064391613,
"max": 0.014759513549506664,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.1075272187590599,
"min": -0.4168945252895355,
"max": 0.1836538314819336,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.011229801923036575,
"min": -0.02888304367661476,
"max": 0.014632614329457283,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.14598742127418518,
"min": -0.43324771523475647,
"max": 0.18205992877483368,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.14307692417731652,
"min": -0.6030769210595351,
"max": 0.38701175942140464,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -1.8600000143051147,
"min": -8.63479995727539,
"max": 6.579199910163879,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.14307692417731652,
"min": -0.6030769210595351,
"max": 0.38701175942140464,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -1.8600000143051147,
"min": -8.63479995727539,
"max": 6.579199910163879,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.019428684155961187,
"min": 0.00999447045414854,
"max": 0.023810121327793848,
"count": 233
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.019428684155961187,
"min": 0.00999447045414854,
"max": 0.023810121327793848,
"count": 233
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0045237826416268945,
"min": 6.480804221572119e-07,
"max": 0.007653752823049823,
"count": 233
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0045237826416268945,
"min": 6.480804221572119e-07,
"max": 0.007653752823049823,
"count": 233
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.004531491803936661,
"min": 7.146187120573207e-07,
"max": 0.007737341243773699,
"count": 233
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.004531491803936661,
"min": 7.146187120573207e-07,
"max": 0.007737341243773699,
"count": 233
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 233
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 233
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 233
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 233
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 233
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 233
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1716537583",
"python_version": "3.10.12 (main, Jul 5 2023, 15:02:25) [Clang 14.0.6 ]",
"command_line_arguments": "/Users/johnnyf/anaconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=/Users/johnnyf/Documents/Learning/Data Science (HuggingFace)/2vs2/ml-agents/training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0",
"numpy_version": "1.23.5",
"end_time_seconds": "1716553215"
},
"total": 15632.168753417005,
"count": 1,
"self": 0.24280958400049713,
"children": {
"run_training.setup": {
"total": 0.013176958003896289,
"count": 1,
"self": 0.013176958003896289
},
"TrainerController.start_learning": {
"total": 15631.912766875,
"count": 1,
"self": 2.3461087064206367,
"children": {
"TrainerController._reset_env": {
"total": 4.399072625979898,
"count": 25,
"self": 4.399072625979898
},
"TrainerController.advance": {
"total": 15625.069361126603,
"count": 324988,
"self": 2.1885476429160917,
"children": {
"env_step": {
"total": 12803.093972983464,
"count": 324988,
"self": 12409.598438564135,
"children": {
"SubprocessEnvManager._take_step": {
"total": 391.75373044495063,
"count": 324988,
"self": 12.512030485027935,
"children": {
"TorchPolicy.evaluate": {
"total": 379.2416999599227,
"count": 645206,
"self": 379.2416999599227
}
}
},
"workers": {
"total": 1.741803974378854,
"count": 324988,
"self": 0.0,
"children": {
"worker_root": {
"total": 15625.0689599537,
"count": 324988,
"is_parallel": true,
"self": 3587.249148508985,
"children": {
"steps_from_proto": {
"total": 0.033551461005117744,
"count": 50,
"is_parallel": true,
"self": 0.00454570104193408,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.029005759963183664,
"count": 200,
"is_parallel": true,
"self": 0.029005759963183664
}
}
},
"UnityEnvironment.step": {
"total": 12037.78625998371,
"count": 324988,
"is_parallel": true,
"self": 31.50657585017325,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 197.16775916561892,
"count": 324988,
"is_parallel": true,
"self": 197.16775916561892
},
"communicator.exchange": {
"total": 11399.821300428768,
"count": 324988,
"is_parallel": true,
"self": 11399.821300428768
},
"steps_from_proto": {
"total": 409.2906245391496,
"count": 649976,
"is_parallel": true,
"self": 45.989281766465865,
"children": {
"_process_rank_one_or_two_observation": {
"total": 363.3013427726837,
"count": 2599904,
"is_parallel": true,
"self": 363.3013427726837
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2819.7868405002228,
"count": 324988,
"self": 26.30770409476827,
"children": {
"process_trajectory": {
"total": 419.61333831751836,
"count": 324988,
"self": 418.59344798352686,
"children": {
"RLTrainer._checkpoint": {
"total": 1.0198903339914978,
"count": 10,
"self": 1.0198903339914978
}
}
},
"_update_policy": {
"total": 2373.865798087936,
"count": 233,
"self": 276.29951186620747,
"children": {
"TorchPOCAOptimizer.update": {
"total": 2097.5662862217287,
"count": 6990,
"self": 2097.5662862217287
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.5797787606716156e-07,
"count": 1,
"self": 4.5797787606716156e-07
},
"TrainerController._save_models": {
"total": 0.09822395801893435,
"count": 1,
"self": 0.0010266670142300427,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09719729100470431,
"count": 1,
"self": 0.09719729100470431
}
}
}
}
}
}
}