poca-SoccerTwos / run_logs /timers.json
Uxinnn's picture
First Push`
5323955
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.162252187728882,
"min": 3.162252187728882,
"max": 3.2957398891448975,
"count": 100
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 75995.2421875,
"min": 25921.0078125,
"max": 133110.59375,
"count": 100
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 558.6,
"min": 424.14285714285717,
"max": 999.0,
"count": 100
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 22344.0,
"min": 11876.0,
"max": 28368.0,
"count": 100
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1214.3936844176385,
"min": 1196.361178639922,
"max": 1214.8056999139687,
"count": 83
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 14572.724213011663,
"min": 2392.722357279844,
"max": 14572.724213011663,
"count": 83
},
"SoccerTwos.Step.mean": {
"value": 999800.0,
"min": 9200.0,
"max": 999800.0,
"count": 100
},
"SoccerTwos.Step.sum": {
"value": 999800.0,
"min": 9200.0,
"max": 999800.0,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.022672370076179504,
"min": -0.06371298432350159,
"max": 0.021307898685336113,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.4534474015235901,
"min": -1.1468013525009155,
"max": 0.3217635750770569,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.02146763913333416,
"min": -0.06367100030183792,
"max": 0.019453663378953934,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.4293527901172638,
"min": -1.1460400819778442,
"max": 0.3416339159011841,
"count": 100
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 100
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.14093999862670897,
"min": -0.47295999924341836,
"max": 0.3928250018507242,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -2.8187999725341797,
"min": -7.094399988651276,
"max": 6.2852000296115875,
"count": 100
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.14093999862670897,
"min": -0.47295999924341836,
"max": 0.3928250018507242,
"count": 100
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -2.8187999725341797,
"min": -7.094399988651276,
"max": 6.2852000296115875,
"count": 100
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.012548627756041241,
"min": 0.01249065709222729,
"max": 0.022648589235420027,
"count": 46
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.012548627756041241,
"min": 0.01249065709222729,
"max": 0.022648589235420027,
"count": 46
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.006773155058423678,
"min": 1.445286061425577e-05,
"max": 0.006773155058423678,
"count": 46
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.006773155058423678,
"min": 1.445286061425577e-05,
"max": 0.006773155058423678,
"count": 46
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.006792481305698554,
"min": 1.4504865278771225e-05,
"max": 0.006792481305698554,
"count": 46
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.006792481305698554,
"min": 1.4504865278771225e-05,
"max": 0.006792481305698554,
"count": 46
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 46
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 46
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 46
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 46
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 46
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 46
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1686133232",
"python_version": "3.9.16 (main, Mar 8 2023, 10:39:24) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\foong\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1686137348"
},
"total": 4116.6963604,
"count": 1,
"self": 0.42616520000046876,
"children": {
"run_training.setup": {
"total": 0.1006718000000002,
"count": 1,
"self": 0.1006718000000002
},
"TrainerController.start_learning": {
"total": 4116.1695234,
"count": 1,
"self": 2.2734087000380896,
"children": {
"TrainerController._reset_env": {
"total": 3.4690902999998645,
"count": 5,
"self": 3.4690902999998645
},
"TrainerController.advance": {
"total": 4110.225137399961,
"count": 65163,
"self": 2.5183218999527526,
"children": {
"env_step": {
"total": 1819.2281803000146,
"count": 65163,
"self": 1420.6205691000682,
"children": {
"SubprocessEnvManager._take_step": {
"total": 397.1312740999462,
"count": 65163,
"self": 13.21819270006688,
"children": {
"TorchPolicy.evaluate": {
"total": 383.91308139987933,
"count": 129386,
"self": 383.91308139987933
}
}
},
"workers": {
"total": 1.4763371000001149,
"count": 65163,
"self": 0.0,
"children": {
"worker_root": {
"total": 4108.652217799923,
"count": 65163,
"is_parallel": true,
"self": 2966.676160199945,
"children": {
"steps_from_proto": {
"total": 0.014683500000186367,
"count": 10,
"is_parallel": true,
"self": 0.0026850000003277863,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.01199849999985858,
"count": 40,
"is_parallel": true,
"self": 0.01199849999985858
}
}
},
"UnityEnvironment.step": {
"total": 1141.9613740999775,
"count": 65163,
"is_parallel": true,
"self": 64.81871039992097,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 52.98523710003018,
"count": 65163,
"is_parallel": true,
"self": 52.98523710003018
},
"communicator.exchange": {
"total": 812.326884199954,
"count": 65163,
"is_parallel": true,
"self": 812.326884199954
},
"steps_from_proto": {
"total": 211.83054240007243,
"count": 130326,
"is_parallel": true,
"self": 40.816247799867625,
"children": {
"_process_rank_one_or_two_observation": {
"total": 171.0142946002048,
"count": 521304,
"is_parallel": true,
"self": 171.0142946002048
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2288.4786351999933,
"count": 65163,
"self": 16.39999309998484,
"children": {
"process_trajectory": {
"total": 320.41879030000734,
"count": 65163,
"self": 319.8852327000071,
"children": {
"RLTrainer._checkpoint": {
"total": 0.533557600000222,
"count": 2,
"self": 0.533557600000222
}
}
},
"_update_policy": {
"total": 1951.6598518000012,
"count": 46,
"self": 203.0268397999971,
"children": {
"TorchPOCAOptimizer.update": {
"total": 1748.633012000004,
"count": 1380,
"self": 1748.633012000004
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4000006558489986e-06,
"count": 1,
"self": 1.4000006558489986e-06
},
"TrainerController._save_models": {
"total": 0.20188560000042344,
"count": 1,
"self": 0.005417700000180048,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1964679000002434,
"count": 1,
"self": 0.1964679000002434
}
}
}
}
}
}
}