jakubgajski's picture
First Push
248d2c8
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.7008748054504395,
"min": 1.663567304611206,
"max": 3.215458869934082,
"count": 851
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 35378.1953125,
"min": 3601.31396484375,
"max": 109269.09375,
"count": 851
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 61.25925925925926,
"min": 33.0,
"max": 999.0,
"count": 851
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19848.0,
"min": 132.0,
"max": 23604.0,
"count": 851
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1601.063072405119,
"min": 1186.2999109760294,
"max": 1624.7476819538674,
"count": 850
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 259372.21772962928,
"min": 2377.0248932028644,
"max": 346340.74750691757,
"count": 850
},
"SoccerTwos.Step.mean": {
"value": 9999910.0,
"min": 1499980.0,
"max": 9999910.0,
"count": 851
},
"SoccerTwos.Step.sum": {
"value": 9999910.0,
"min": 1499980.0,
"max": 9999910.0,
"count": 851
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.038446467369794846,
"min": -0.11436130851507187,
"max": 0.16732440888881683,
"count": 851
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -6.189881324768066,
"min": -21.297691345214844,
"max": 20.255615234375,
"count": 851
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.037617385387420654,
"min": -0.11490459740161896,
"max": 0.1676502525806427,
"count": 851
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -6.056398868560791,
"min": -20.465923309326172,
"max": 20.013717651367188,
"count": 851
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 851
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 851
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.11372670623826685,
"min": -0.6428571428571429,
"max": 0.9652000069618225,
"count": 851
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -18.309999704360962,
"min": -54.205000162124634,
"max": 53.77600014209747,
"count": 851
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.11372670623826685,
"min": -0.6428571428571429,
"max": 0.9652000069618225,
"count": 851
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -18.309999704360962,
"min": -54.205000162124634,
"max": 53.77600014209747,
"count": 851
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 851
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 851
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.008098788173325981,
"min": 0.005674449715297669,
"max": 0.018914056698607358,
"count": 410
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.008098788173325981,
"min": 0.005674449715297669,
"max": 0.018914056698607358,
"count": 410
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.09481088022391002,
"min": 0.0011576284072361887,
"max": 0.12343154698610306,
"count": 410
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.09481088022391002,
"min": 0.0011576284072361887,
"max": 0.12343154698610306,
"count": 410
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.0953474298119545,
"min": 0.001153069622038553,
"max": 0.12456244130929311,
"count": 410
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.0953474298119545,
"min": 0.001153069622038553,
"max": 0.12456244130929311,
"count": 410
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 410
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 410
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 410
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 410
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005,
"min": 0.005,
"max": 0.005,
"count": 410
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005,
"min": 0.005,
"max": 0.005,
"count": 410
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1684305914",
"python_version": "3.9.16 (main, Mar 8 2023, 14:00:05) \n[GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1684326360"
},
"total": 20446.436617105,
"count": 1,
"self": 6.6366969179944135,
"children": {
"run_training.setup": {
"total": 0.022039218000031724,
"count": 1,
"self": 0.022039218000031724
},
"TrainerController.start_learning": {
"total": 20439.777880969003,
"count": 1,
"self": 17.00123791754595,
"children": {
"TrainerController._reset_env": {
"total": 66.82938978198877,
"count": 32,
"self": 66.82938978198877
},
"TrainerController.advance": {
"total": 20354.402077808467,
"count": 581427,
"self": 16.553720427978988,
"children": {
"env_step": {
"total": 15114.725241913844,
"count": 581427,
"self": 11886.111546101933,
"children": {
"SubprocessEnvManager._take_step": {
"total": 3219.7607385578494,
"count": 581427,
"self": 96.34840956989592,
"children": {
"TorchPolicy.evaluate": {
"total": 3123.4123289879535,
"count": 1067736,
"self": 3123.4123289879535
}
}
},
"workers": {
"total": 8.852957254062403,
"count": 581427,
"self": 0.0,
"children": {
"worker_root": {
"total": 20399.3307160576,
"count": 581427,
"is_parallel": true,
"self": 10384.333416753583,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004083515000047555,
"count": 2,
"is_parallel": true,
"self": 0.001060344000279656,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003023170999767899,
"count": 8,
"is_parallel": true,
"self": 0.003023170999767899
}
}
},
"UnityEnvironment.step": {
"total": 0.0368012799999633,
"count": 1,
"is_parallel": true,
"self": 0.0010314519998928517,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0008096920000753016,
"count": 1,
"is_parallel": true,
"self": 0.0008096920000753016
},
"communicator.exchange": {
"total": 0.03183866799997759,
"count": 1,
"is_parallel": true,
"self": 0.03183866799997759
},
"steps_from_proto": {
"total": 0.0031214680000175576,
"count": 2,
"is_parallel": true,
"self": 0.0006067409999559459,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0025147270000616118,
"count": 8,
"is_parallel": true,
"self": 0.0025147270000616118
}
}
}
}
}
}
},
"steps_from_proto": {
"total": 0.07466201700049169,
"count": 62,
"is_parallel": true,
"self": 0.014835434990231988,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0598265820102597,
"count": 248,
"is_parallel": true,
"self": 0.0598265820102597
}
}
},
"UnityEnvironment.step": {
"total": 10014.922637287016,
"count": 581426,
"is_parallel": true,
"self": 564.1436291081536,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 364.80537276902044,
"count": 581426,
"is_parallel": true,
"self": 364.80537276902044
},
"communicator.exchange": {
"total": 7389.2133546077885,
"count": 581426,
"is_parallel": true,
"self": 7389.2133546077885
},
"steps_from_proto": {
"total": 1696.7602808020547,
"count": 1162852,
"is_parallel": true,
"self": 324.84296026705783,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1371.9173205349969,
"count": 4651408,
"is_parallel": true,
"self": 1371.9173205349969
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 5223.123115466644,
"count": 581427,
"self": 115.56287661098759,
"children": {
"process_trajectory": {
"total": 1900.8606173566482,
"count": 581427,
"self": 1886.9744893836473,
"children": {
"RLTrainer._checkpoint": {
"total": 13.88612797300084,
"count": 18,
"self": 13.88612797300084
}
}
},
"_update_policy": {
"total": 3206.699621499008,
"count": 410,
"self": 1831.1733181040508,
"children": {
"TorchPOCAOptimizer.update": {
"total": 1375.5263033949573,
"count": 6150,
"self": 1375.5263033949573
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1459997040219605e-06,
"count": 1,
"self": 1.1459997040219605e-06
},
"TrainerController._save_models": {
"total": 1.5451743150006223,
"count": 1,
"self": 0.717765588000475,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8274087270001473,
"count": 1,
"self": 0.8274087270001473
}
}
}
}
}
}
}