poca-SoccerTwos / run_logs /timers.json
ItchyB's picture
First Push
df55dbe
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.0066587924957275,
"min": 1.966582179069519,
"max": 3.2956998348236084,
"count": 4998
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 41032.16015625,
"min": 1581.9359130859375,
"max": 418684.59375,
"count": 4998
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 57.35294117647059,
"min": 34.41843971631206,
"max": 999.0,
"count": 4998
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19500.0,
"min": 14620.0,
"max": 39960.0,
"count": 4998
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1640.6328847050133,
"min": 1153.7693589206315,
"max": 1674.10436036523,
"count": 4834
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 278907.59039985226,
"min": 2313.576287843596,
"max": 442666.95510906423,
"count": 4834
},
"SoccerTwos.Step.mean": {
"value": 49999798.0,
"min": 9592.0,
"max": 49999798.0,
"count": 5000
},
"SoccerTwos.Step.sum": {
"value": 49999798.0,
"min": 9592.0,
"max": 49999798.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.0017385132377967238,
"min": -0.12633009254932404,
"max": 0.1722392737865448,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.2955472469329834,
"min": -26.76581382751465,
"max": 29.213077545166016,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.0020061323884874582,
"min": -0.13189859688282013,
"max": 0.17249120771884918,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.34104251861572266,
"min": -27.197973251342773,
"max": 29.7274169921875,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.07054117988137638,
"min": -0.75,
"max": 0.44046000242233274,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 11.992000579833984,
"min": -66.54319989681244,
"max": 55.15440022945404,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.07054117988137638,
"min": -0.75,
"max": 0.44046000242233274,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 11.992000579833984,
"min": -66.54319989681244,
"max": 55.15440022945404,
"count": 5000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.0178281808513763,
"min": 0.01644083473639815,
"max": 0.019236101510972124,
"count": 121
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.0178281808513763,
"min": 0.01644083473639815,
"max": 0.019236101510972124,
"count": 121
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.09072129296759765,
"min": 0.0013434893971134444,
"max": 0.10200655835370223,
"count": 121
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.09072129296759765,
"min": 0.0013434893971134444,
"max": 0.10200655835370223,
"count": 121
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09396490924060345,
"min": 0.0014256863485691913,
"max": 0.10843392221877972,
"count": 121
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.09396490924060345,
"min": 0.0014256863485691913,
"max": 0.10843392221877972,
"count": 121
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 121
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 121
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.2,
"min": 0.2,
"max": 0.2,
"count": 121
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.2,
"min": 0.2,
"max": 0.2,
"count": 121
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005,
"min": 0.005,
"max": 0.005,
"count": 121
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005,
"min": 0.005,
"max": 0.005,
"count": 121
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1686976337",
"python_version": "3.9.16 | packaged by conda-forge | (main, Feb 1 2023, 21:39:03) \n[GCC 11.3.0]",
"command_line_arguments": "/home/byron/miniconda3/envs/ml-agents/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos2 --no-graphics --force --num-envs 4 --num-areas 24",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu113",
"numpy_version": "1.21.2",
"end_time_seconds": "1687065230"
},
"total": 88890.95312685799,
"count": 1,
"self": 10.005198557977565,
"children": {
"run_training.setup": {
"total": 0.02026322900201194,
"count": 1,
"self": 0.02026322900201194
},
"TrainerController.start_learning": {
"total": 88880.92766507101,
"count": 1,
"self": 51.12182409907109,
"children": {
"TrainerController._reset_env": {
"total": 28.78135965191177,
"count": 250,
"self": 28.78135965191177
},
"TrainerController.advance": {
"total": 88800.89237597602,
"count": 2897773,
"self": 46.176983293145895,
"children": {
"env_step": {
"total": 16112.32562589184,
"count": 2897773,
"self": 8337.92734770324,
"children": {
"SubprocessEnvManager._take_step": {
"total": 7744.525256735971,
"count": 3537101,
"self": 358.2366272135696,
"children": {
"TorchPolicy.evaluate": {
"total": 7386.288629522402,
"count": 6553064,
"self": 7386.288629522402
}
}
},
"workers": {
"total": 29.873021452629473,
"count": 2897773,
"self": 0.0,
"children": {
"worker_root": {
"total": 355329.01992389985,
"count": 3536497,
"is_parallel": true,
"self": 312393.60162804194,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006368501999531873,
"count": 8,
"is_parallel": true,
"self": 0.0016485920059494674,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.004719909993582405,
"count": 32,
"is_parallel": true,
"self": 0.004719909993582405
}
}
},
"UnityEnvironment.step": {
"total": 0.09952747898933012,
"count": 4,
"is_parallel": true,
"self": 0.0018694659665925428,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0011861009988933802,
"count": 4,
"is_parallel": true,
"self": 0.0011861009988933802
},
"communicator.exchange": {
"total": 0.09098909799649846,
"count": 4,
"is_parallel": true,
"self": 0.09098909799649846
},
"steps_from_proto": {
"total": 0.005482814027345739,
"count": 8,
"is_parallel": true,
"self": 0.0012111670221202075,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.004271647005225532,
"count": 32,
"is_parallel": true,
"self": 0.004271647005225532
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 42934.21940120563,
"count": 3536493,
"is_parallel": true,
"self": 1466.2873808125732,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 940.9898658304737,
"count": 3536493,
"is_parallel": true,
"self": 940.9898658304737
},
"communicator.exchange": {
"total": 36072.97828729372,
"count": 3536493,
"is_parallel": true,
"self": 36072.97828729372
},
"steps_from_proto": {
"total": 4453.96386726886,
"count": 7072986,
"is_parallel": true,
"self": 871.4131258842244,
"children": {
"_process_rank_one_or_two_observation": {
"total": 3582.5507413846353,
"count": 28291944,
"is_parallel": true,
"self": 3582.5507413846353
}
}
}
}
},
"steps_from_proto": {
"total": 1.1988946522615151,
"count": 1992,
"is_parallel": true,
"self": 0.23140063536993694,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.9674940168915782,
"count": 7968,
"is_parallel": true,
"self": 0.9674940168915782
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 72642.38976679104,
"count": 2897773,
"self": 509.61341205817007,
"children": {
"process_trajectory": {
"total": 11321.511744074814,
"count": 2897773,
"self": 11309.370236086936,
"children": {
"RLTrainer._checkpoint": {
"total": 12.141507987878867,
"count": 100,
"self": 12.141507987878867
}
}
},
"_update_policy": {
"total": 60811.26461065805,
"count": 121,
"self": 5908.727442015937,
"children": {
"TorchPOCAOptimizer.update": {
"total": 54902.53716864211,
"count": 72600,
"self": 54902.53716864211
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.899951353669167e-07,
"count": 1,
"self": 8.899951353669167e-07
},
"TrainerController._save_models": {
"total": 0.1321044540090952,
"count": 1,
"self": 0.008838533016387373,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12326592099270783,
"count": 1,
"self": 0.12326592099270783
}
}
}
}
}
}
}