poca-SoccerTwos / run_logs /timers.json
jonduea's picture
First Push
de1bbc7
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.4541367292404175,
"min": 1.2375152111053467,
"max": 3.1320574283599854,
"count": 2149
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 29641.123046875,
"min": 22702.34765625,
"max": 99956.203125,
"count": 2149
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 50.23711340206186,
"min": 39.57377049180328,
"max": 999.0,
"count": 2149
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19492.0,
"min": 4992.0,
"max": 23064.0,
"count": 2149
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1690.3184599198976,
"min": 1240.3301940147737,
"max": 1717.6282291273603,
"count": 2148
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 327921.7812244601,
"min": 4978.456025288048,
"max": 411136.9628038114,
"count": 2148
},
"SoccerTwos.Step.mean": {
"value": 25129952.0,
"min": 3649775.0,
"max": 25129952.0,
"count": 2149
},
"SoccerTwos.Step.sum": {
"value": 25129952.0,
"min": 3649775.0,
"max": 25129952.0,
"count": 2149
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.03563397377729416,
"min": -0.14790469408035278,
"max": 0.21847504377365112,
"count": 2149
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -6.948624610900879,
"min": -26.592159271240234,
"max": 32.77125549316406,
"count": 2149
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.030216647312045097,
"min": -0.14879702031612396,
"max": 0.22972778975963593,
"count": 2149
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -5.892246246337891,
"min": -28.268308639526367,
"max": 34.45916748046875,
"count": 2149
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 2149
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 2149
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.1109282041207338,
"min": -0.5572363625873219,
"max": 0.5313407436565116,
"count": 2149
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -21.63099980354309,
"min": -61.24120008945465,
"max": 59.0591995716095,
"count": 2149
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.1109282041207338,
"min": -0.5572363625873219,
"max": 0.5313407436565116,
"count": 2149
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -21.63099980354309,
"min": -61.24120008945465,
"max": 59.0591995716095,
"count": 2149
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 2149
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 2149
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.022764509086846373,
"min": 0.009685336826563192,
"max": 0.02565293670243894,
"count": 1042
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.022764509086846373,
"min": 0.009685336826563192,
"max": 0.02565293670243894,
"count": 1042
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.12950191671649616,
"min": 0.0033425594214349986,
"max": 0.16743886222441992,
"count": 1042
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.12950191671649616,
"min": 0.0033425594214349986,
"max": 0.16743886222441992,
"count": 1042
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.1310691346724828,
"min": 0.003399091570948561,
"max": 0.1712032919128736,
"count": 1042
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.1310691346724828,
"min": 0.003399091570948561,
"max": 0.1712032919128736,
"count": 1042
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 1042
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 1042
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 1042
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 1042
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 1042
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 1042
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1701568682",
"python_version": "3.10.12 | packaged by conda-forge | (main, Jun 23 2023, 22:34:57) [MSC v.1936 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\P:\\envs\\rl\\Scripts\\mlagents-learn P:\\projects\\machine_learning_projects\\ai_vs_ai\\ml-agents\\config\\poca\\SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.1+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1701637341"
},
"total": 68658.9599409,
"count": 1,
"self": 0.21699479999369942,
"children": {
"run_training.setup": {
"total": 0.08193220000248402,
"count": 1,
"self": 0.08193220000248402
},
"TrainerController.start_learning": {
"total": 68658.6610139,
"count": 1,
"self": 35.3683730938792,
"children": {
"TrainerController._reset_env": {
"total": 5.628104999988864,
"count": 109,
"self": 5.628104999988864
},
"TrainerController.advance": {
"total": 68617.53049780613,
"count": 1495413,
"self": 31.63212630386988,
"children": {
"env_step": {
"total": 25251.367267200425,
"count": 1495413,
"self": 19903.576513396678,
"children": {
"SubprocessEnvManager._take_step": {
"total": 5325.007258799444,
"count": 1495413,
"self": 179.27663200836832,
"children": {
"TorchPolicy.evaluate": {
"total": 5145.730626791075,
"count": 2697296,
"self": 5145.730626791075
}
}
},
"workers": {
"total": 22.783495004303404,
"count": 1495413,
"self": 0.0,
"children": {
"worker_root": {
"total": 68601.84516440403,
"count": 1495413,
"is_parallel": true,
"self": 52816.90971182069,
"children": {
"steps_from_proto": {
"total": 0.1947636000768398,
"count": 218,
"is_parallel": true,
"self": 0.03588760009006364,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.15887599998677615,
"count": 872,
"is_parallel": true,
"self": 0.15887599998677615
}
}
},
"UnityEnvironment.step": {
"total": 15784.740688983264,
"count": 1495413,
"is_parallel": true,
"self": 769.8504399854864,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 642.8728582056501,
"count": 1495413,
"is_parallel": true,
"self": 642.8728582056501
},
"communicator.exchange": {
"total": 11825.80669318459,
"count": 1495413,
"is_parallel": true,
"self": 11825.80669318459
},
"steps_from_proto": {
"total": 2546.2106976075374,
"count": 2990826,
"is_parallel": true,
"self": 483.6643695834355,
"children": {
"_process_rank_one_or_two_observation": {
"total": 2062.546328024102,
"count": 11963304,
"is_parallel": true,
"self": 2062.546328024102
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 43334.531104301845,
"count": 1495413,
"self": 265.390376212883,
"children": {
"process_trajectory": {
"total": 7878.7989241888135,
"count": 1495413,
"self": 7873.1031263888435,
"children": {
"RLTrainer._checkpoint": {
"total": 5.695797799970023,
"count": 43,
"self": 5.695797799970023
}
}
},
"_update_policy": {
"total": 35190.34180390015,
"count": 1043,
"self": 3889.4235505988836,
"children": {
"TorchPOCAOptimizer.update": {
"total": 31300.918253301264,
"count": 31290,
"self": 31300.918253301264
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1999945854768157e-06,
"count": 1,
"self": 1.1999945854768157e-06
},
"TrainerController._save_models": {
"total": 0.13403679999464657,
"count": 1,
"self": 0.006410799993318506,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12762600000132807,
"count": 1,
"self": 0.12762600000132807
}
}
}
}
}
}
}