poca-SoccerTwos / run_logs /timers.json
huanvo88's picture
First Push
49281b3
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.7136650085449219,
"min": 1.5350723266601562,
"max": 3.2957305908203125,
"count": 1000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 36357.1171875,
"min": 15750.5185546875,
"max": 120054.421875,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 74.6086956521739,
"min": 38.65079365079365,
"max": 999.0,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 20592.0,
"min": 14828.0,
"max": 26420.0,
"count": 1000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1511.0859777294277,
"min": 1193.0601290515422,
"max": 1571.806339547675,
"count": 990
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 208529.86492666102,
"min": 2387.809367873785,
"max": 374921.744832885,
"count": 990
},
"SoccerTwos.Step.mean": {
"value": 9999998.0,
"min": 9228.0,
"max": 9999998.0,
"count": 1000
},
"SoccerTwos.Step.sum": {
"value": 9999998.0,
"min": 9228.0,
"max": 9999998.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.022341318428516388,
"min": -0.1456126570701599,
"max": 0.18798129260540009,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -3.060760498046875,
"min": -25.419660568237305,
"max": 29.456161499023438,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.020632857456803322,
"min": -0.1476450115442276,
"max": 0.19568011164665222,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -2.8267014026641846,
"min": -26.40540313720703,
"max": 30.500186920166016,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.06458832225660338,
"min": -0.6154260894526606,
"max": 0.41083333020408946,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -8.848600149154663,
"min": -67.81560003757477,
"max": 68.53319978713989,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.06458832225660338,
"min": -0.6154260894526606,
"max": 0.41083333020408946,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -8.848600149154663,
"min": -67.81560003757477,
"max": 68.53319978713989,
"count": 1000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.016120714694261552,
"min": 0.009758775869219487,
"max": 0.025247943121939898,
"count": 483
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.016120714694261552,
"min": 0.009758775869219487,
"max": 0.025247943121939898,
"count": 483
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.08416846220692,
"min": 7.530191736198807e-05,
"max": 0.12263667906324069,
"count": 483
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.08416846220692,
"min": 7.530191736198807e-05,
"max": 0.12263667906324069,
"count": 483
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.08540954291820527,
"min": 7.565417496759134e-05,
"max": 0.1250862310330073,
"count": 483
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.08540954291820527,
"min": 7.565417496759134e-05,
"max": 0.1250862310330073,
"count": 483
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 483
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 483
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 483
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 483
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 483
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 483
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1684591397",
"python_version": "3.10.6 (main, Mar 10 2023, 10:55:28) [GCC 11.3.0]",
"command_line_arguments": "/home/huan/Projects/hugging_face_rl_course/unit7/venv/unit7_env/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwosTest2 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1684605670"
},
"total": 14272.580194076,
"count": 1,
"self": 0.218427805997635,
"children": {
"run_training.setup": {
"total": 0.007288466000318294,
"count": 1,
"self": 0.007288466000318294
},
"TrainerController.start_learning": {
"total": 14272.354477804001,
"count": 1,
"self": 9.269420698634349,
"children": {
"TrainerController._reset_env": {
"total": 3.6368507330034845,
"count": 50,
"self": 3.6368507330034845
},
"TrainerController.advance": {
"total": 14259.294100940362,
"count": 688112,
"self": 8.079108767899015,
"children": {
"env_step": {
"total": 11224.007077465074,
"count": 688112,
"self": 9375.844921926126,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1842.2507868443226,
"count": 688112,
"self": 49.238166351660766,
"children": {
"TorchPolicy.evaluate": {
"total": 1793.0126204926619,
"count": 1259568,
"self": 1793.0126204926619
}
}
},
"workers": {
"total": 5.911368694625708,
"count": 688112,
"self": 0.0,
"children": {
"worker_root": {
"total": 14255.821063522044,
"count": 688112,
"is_parallel": true,
"self": 6081.803770738361,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0030785440003455733,
"count": 2,
"is_parallel": true,
"self": 0.0006197150014486397,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0024588289988969336,
"count": 8,
"is_parallel": true,
"self": 0.0024588289988969336
}
}
},
"UnityEnvironment.step": {
"total": 0.020829111999773886,
"count": 1,
"is_parallel": true,
"self": 0.0009763869993548724,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0006373370006258483,
"count": 1,
"is_parallel": true,
"self": 0.0006373370006258483
},
"communicator.exchange": {
"total": 0.016679096999723697,
"count": 1,
"is_parallel": true,
"self": 0.016679096999723697
},
"steps_from_proto": {
"total": 0.0025362910000694683,
"count": 2,
"is_parallel": true,
"self": 0.00045332900026551215,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002082961999803956,
"count": 8,
"is_parallel": true,
"self": 0.002082961999803956
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 8173.894460725674,
"count": 688111,
"is_parallel": true,
"self": 541.6470515498859,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 348.6990487199264,
"count": 688111,
"is_parallel": true,
"self": 348.6990487199264
},
"communicator.exchange": {
"total": 5800.336684562034,
"count": 688111,
"is_parallel": true,
"self": 5800.336684562034
},
"steps_from_proto": {
"total": 1483.2116758938282,
"count": 1376222,
"is_parallel": true,
"self": 241.4616300410762,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1241.750045852752,
"count": 5504888,
"is_parallel": true,
"self": 1241.750045852752
}
}
}
}
},
"steps_from_proto": {
"total": 0.12283205800849828,
"count": 98,
"is_parallel": true,
"self": 0.01995402903685317,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.10287802897164511,
"count": 392,
"is_parallel": true,
"self": 0.10287802897164511
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 3027.20791470739,
"count": 688112,
"self": 83.53115153940053,
"children": {
"process_trajectory": {
"total": 1079.6720547569594,
"count": 688112,
"self": 1076.5694863899635,
"children": {
"RLTrainer._checkpoint": {
"total": 3.1025683669959108,
"count": 20,
"self": 3.1025683669959108
}
}
},
"_update_policy": {
"total": 1864.0047084110302,
"count": 483,
"self": 1174.580594433116,
"children": {
"TorchPOCAOptimizer.update": {
"total": 689.4241139779142,
"count": 14490,
"self": 689.4241139779142
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.90999809699133e-07,
"count": 1,
"self": 4.90999809699133e-07
},
"TrainerController._save_models": {
"total": 0.15410494100069627,
"count": 1,
"self": 0.0011490880024211947,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15295585299827508,
"count": 1,
"self": 0.15295585299827508
}
}
}
}
}
}
}