poca-SoccerTwos / run_logs /timers.json
BabaYaga048's picture
First Push
e027167
raw
history blame contribute delete
No virus
15.6 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.5422695875167847,
"min": 1.358902931213379,
"max": 1.6392451524734497,
"count": 131
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 30203.806640625,
"min": 869.6978759765625,
"max": 35354.890625,
"count": 131
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 61.8875,
"min": 14.0,
"max": 72.31884057971014,
"count": 131
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19804.0,
"min": 112.0,
"max": 20420.0,
"count": 131
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1555.0830582808662,
"min": 1503.1061852957882,
"max": 1569.937910488838,
"count": 131
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 248813.2893249386,
"min": 6140.598878565317,
"max": 341490.80077715684,
"count": 131
},
"SoccerTwos.Step.mean": {
"value": 9999986.0,
"min": 8699987.0,
"max": 9999986.0,
"count": 131
},
"SoccerTwos.Step.sum": {
"value": 9999986.0,
"min": 8699987.0,
"max": 9999986.0,
"count": 131
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.039965007454156876,
"min": -0.07955111563205719,
"max": 0.07412034273147583,
"count": 131
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -6.354435920715332,
"min": -15.674731254577637,
"max": 12.600458145141602,
"count": 131
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.04035235196352005,
"min": -0.08584316074848175,
"max": 0.08051864057779312,
"count": 131
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -6.4160237312316895,
"min": -14.986276626586914,
"max": 13.688169479370117,
"count": 131
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 131
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 131
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.03440125670822911,
"min": -0.2702774581192546,
"max": 0.3316309686630003,
"count": 131
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -5.469799816608429,
"min": -55.96039962768555,
"max": 51.402800142765045,
"count": 131
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.03440125670822911,
"min": -0.2702774581192546,
"max": 0.3316309686630003,
"count": 131
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -5.469799816608429,
"min": -55.96039962768555,
"max": 51.402800142765045,
"count": 131
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 131
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 131
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.015145652223751919,
"min": 0.011791978035277376,
"max": 0.023321755667954372,
"count": 63
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.015145652223751919,
"min": 0.011791978035277376,
"max": 0.023321755667954372,
"count": 63
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10312746713558833,
"min": 0.09516235515475273,
"max": 0.11794356231888135,
"count": 63
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10312746713558833,
"min": 0.09516235515475273,
"max": 0.11794356231888135,
"count": 63
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10443195899327597,
"min": 0.09606056759754816,
"max": 0.11946648806333542,
"count": 63
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10443195899327597,
"min": 0.09606056759754816,
"max": 0.11946648806333542,
"count": 63
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 63
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 63
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 63
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 63
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 63
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 63
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1692207371",
"python_version": "3.9.17 (main, Jul 5 2023, 16:17:03) \n[Clang 14.0.6 ]",
"command_line_arguments": "/opt/anaconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0",
"numpy_version": "1.21.2",
"end_time_seconds": "1692213584"
},
"total": 5904.371516334,
"count": 1,
"self": 2.3665285830002176,
"children": {
"run_training.setup": {
"total": 0.03477250000000254,
"count": 1,
"self": 0.03477250000000254
},
"TrainerController.start_learning": {
"total": 5901.970215251,
"count": 1,
"self": 1.335926395909155,
"children": {
"TrainerController._reset_env": {
"total": 6.9797415419994415,
"count": 8,
"self": 6.9797415419994415
},
"TrainerController.advance": {
"total": 5893.388342604091,
"count": 90501,
"self": 1.1450277041085428,
"children": {
"env_step": {
"total": 1214.93956218408,
"count": 90501,
"self": 980.8131489869226,
"children": {
"SubprocessEnvManager._take_step": {
"total": 233.3234402870467,
"count": 90501,
"self": 5.983297622016522,
"children": {
"TorchPolicy.evaluate": {
"total": 227.34014266503019,
"count": 163138,
"self": 227.34014266503019
}
}
},
"workers": {
"total": 0.802972910110654,
"count": 90501,
"self": 0.0,
"children": {
"worker_root": {
"total": 5892.433743614063,
"count": 90501,
"is_parallel": true,
"self": 5054.01503899014,
"children": {
"steps_from_proto": {
"total": 0.012321166999555544,
"count": 16,
"is_parallel": true,
"self": 0.00219025000286166,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.010130916996693884,
"count": 64,
"is_parallel": true,
"self": 0.010130916996693884
}
}
},
"UnityEnvironment.step": {
"total": 838.4063834569233,
"count": 90501,
"is_parallel": true,
"self": 45.301781421703595,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 26.50081490400531,
"count": 90501,
"is_parallel": true,
"self": 26.50081490400531
},
"communicator.exchange": {
"total": 647.0776638040975,
"count": 90501,
"is_parallel": true,
"self": 647.0776638040975
},
"steps_from_proto": {
"total": 119.52612332711689,
"count": 181002,
"is_parallel": true,
"self": 20.076650796359928,
"children": {
"_process_rank_one_or_two_observation": {
"total": 99.44947253075696,
"count": 724008,
"is_parallel": true,
"self": 99.44947253075696
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 4677.303752715903,
"count": 90501,
"self": 11.050956303944986,
"children": {
"process_trajectory": {
"total": 590.7645311589569,
"count": 90501,
"self": 589.4128263679576,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3517047909992925,
"count": 3,
"self": 1.3517047909992925
}
}
},
"_update_policy": {
"total": 4075.4882652530014,
"count": 63,
"self": 165.5546123100121,
"children": {
"TorchPOCAOptimizer.update": {
"total": 3909.9336529429893,
"count": 1890,
"self": 3909.9336529429893
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.840001904289238e-07,
"count": 1,
"self": 5.840001904289238e-07
},
"TrainerController._save_models": {
"total": 0.26620412499960366,
"count": 1,
"self": 0.01405583299947466,
"children": {
"RLTrainer._checkpoint": {
"total": 0.252148292000129,
"count": 1,
"self": 0.252148292000129
}
}
}
}
}
}
}