poca-SoccerTwos / run_logs /timers.json
Sookeyy's picture
FirstPushonhf
ad311b8
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.6969166994094849,
"min": 1.6600289344787598,
"max": 3.2957608699798584,
"count": 716
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 32200.69140625,
"min": 24688.013671875,
"max": 116438.890625,
"count": 716
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 61.17948717948718,
"min": 41.55263157894737,
"max": 999.0,
"count": 716
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19088.0,
"min": 13220.0,
"max": 29168.0,
"count": 716
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1620.4971049478559,
"min": 1196.3410541923008,
"max": 1642.0822759048187,
"count": 714
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 252797.5483718655,
"min": 2394.5245533924726,
"max": 366864.2800499807,
"count": 714
},
"SoccerTwos.Step.mean": {
"value": 7159779.0,
"min": 9600.0,
"max": 7159779.0,
"count": 716
},
"SoccerTwos.Step.sum": {
"value": 7159779.0,
"min": 9600.0,
"max": 7159779.0,
"count": 716
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.03972044959664345,
"min": -0.11577586084604263,
"max": 0.17879104614257812,
"count": 716
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -6.236110687255859,
"min": -21.881637573242188,
"max": 30.8237247467041,
"count": 716
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.03798370808362961,
"min": -0.11704134196043015,
"max": 0.18601049482822418,
"count": 716
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -5.963442325592041,
"min": -22.120813369750977,
"max": 30.830636978149414,
"count": 716
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 716
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 716
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.18272102182837807,
"min": -0.5435230777813838,
"max": 0.4047047637757801,
"count": 716
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -28.68720042705536,
"min": -56.80580013990402,
"max": 72.85040009021759,
"count": 716
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.18272102182837807,
"min": -0.5435230777813838,
"max": 0.4047047637757801,
"count": 716
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -28.68720042705536,
"min": -56.80580013990402,
"max": 72.85040009021759,
"count": 716
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 716
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 716
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.018955567812857527,
"min": 0.011760395910823718,
"max": 0.02439175733986,
"count": 345
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.018955567812857527,
"min": 0.011760395910823718,
"max": 0.02439175733986,
"count": 345
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.09430747206012408,
"min": 0.001318148347005869,
"max": 0.12452329446872075,
"count": 345
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.09430747206012408,
"min": 0.001318148347005869,
"max": 0.12452329446872075,
"count": 345
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09539426416158676,
"min": 0.0013465542113408446,
"max": 0.1273934985200564,
"count": 345
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.09539426416158676,
"min": 0.0013465542113408446,
"max": 0.1273934985200564,
"count": 345
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 345
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 345
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 345
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 345
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 345
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 345
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1691382831",
"python_version": "3.9.17 (main, Jul 5 2023, 20:47:11) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\KIIT\\anaconda3\\envs\\soccertwos\\Scripts\\mlagents-learn .\\config\\poca\\SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1691419952"
},
"total": 37120.8491212,
"count": 1,
"self": 0.30677389999618754,
"children": {
"run_training.setup": {
"total": 0.08919880000000013,
"count": 1,
"self": 0.08919880000000013
},
"TrainerController.start_learning": {
"total": 37120.453148500004,
"count": 1,
"self": 20.33770420008659,
"children": {
"TrainerController._reset_env": {
"total": 13.694396899995077,
"count": 36,
"self": 13.694396899995077
},
"TrainerController.advance": {
"total": 37086.261934099915,
"count": 492424,
"self": 22.40536360043916,
"children": {
"env_step": {
"total": 15485.984040899692,
"count": 492424,
"self": 12373.582369201213,
"children": {
"SubprocessEnvManager._take_step": {
"total": 3100.000899998611,
"count": 492424,
"self": 117.0006025992243,
"children": {
"TorchPolicy.evaluate": {
"total": 2983.000297399387,
"count": 903100,
"self": 2983.000297399387
}
}
},
"workers": {
"total": 12.40077169986739,
"count": 492424,
"self": 0.0,
"children": {
"worker_root": {
"total": 37075.89335389997,
"count": 492424,
"is_parallel": true,
"self": 26897.2692583011,
"children": {
"steps_from_proto": {
"total": 0.10038329999326834,
"count": 72,
"is_parallel": true,
"self": 0.0196578000123786,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.08072549998088974,
"count": 288,
"is_parallel": true,
"self": 0.08072549998088974
}
}
},
"UnityEnvironment.step": {
"total": 10178.523712298875,
"count": 492424,
"is_parallel": true,
"self": 524.8324196028625,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 389.91999889780186,
"count": 492424,
"is_parallel": true,
"self": 389.91999889780186
},
"communicator.exchange": {
"total": 7672.752460799156,
"count": 492424,
"is_parallel": true,
"self": 7672.752460799156
},
"steps_from_proto": {
"total": 1591.018832999055,
"count": 984848,
"is_parallel": true,
"self": 314.2547457979849,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1276.76408720107,
"count": 3939392,
"is_parallel": true,
"self": 1276.76408720107
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 21577.872529599783,
"count": 492423,
"self": 142.06196080096197,
"children": {
"process_trajectory": {
"total": 3511.3798764988283,
"count": 492423,
"self": 3506.947726598819,
"children": {
"RLTrainer._checkpoint": {
"total": 4.432149900009563,
"count": 14,
"self": 4.432149900009563
}
}
},
"_update_policy": {
"total": 17924.430692299993,
"count": 346,
"self": 1726.6294520000083,
"children": {
"TorchPOCAOptimizer.update": {
"total": 16197.801240299985,
"count": 10380,
"self": 16197.801240299985
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.400003384333104e-06,
"count": 1,
"self": 1.400003384333104e-06
},
"TrainerController._save_models": {
"total": 0.15911190000042552,
"count": 1,
"self": 0.007620400006999262,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15149149999342626,
"count": 1,
"self": 0.15149149999342626
}
}
}
}
}
}
}