poca-SoccerTwos / run_logs /timers.json
edures's picture
First Push
716459a
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.1578032970428467,
"min": 3.0943503379821777,
"max": 3.2957470417022705,
"count": 533
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 68814.8515625,
"min": 9006.1396484375,
"max": 133231.34375,
"count": 533
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 920.4,
"min": 409.1666666666667,
"max": 999.0,
"count": 533
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 18408.0,
"min": 16344.0,
"max": 23780.0,
"count": 533
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1211.0140709225593,
"min": 1184.7310891700515,
"max": 1211.652420415645,
"count": 390
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 4844.056283690237,
"min": 2370.4452805059636,
"max": 21534.521166968163,
"count": 390
},
"SoccerTwos.Step.mean": {
"value": 5329634.0,
"min": 9328.0,
"max": 5329634.0,
"count": 533
},
"SoccerTwos.Step.sum": {
"value": 5329634.0,
"min": 9328.0,
"max": 5329634.0,
"count": 533
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.00663940142840147,
"min": -0.04170246422290802,
"max": 0.005158791318535805,
"count": 533
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.0730334147810936,
"min": -0.5421320199966431,
"max": 0.05741597339510918,
"count": 533
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.006606684532016516,
"min": -0.037998493760824203,
"max": 0.004716083873063326,
"count": 533
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.07267352938652039,
"min": -0.5038904547691345,
"max": 0.05187692493200302,
"count": 533
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 533
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 533
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.16701818189837717,
"min": -0.6666666666666666,
"max": 0.36900000274181366,
"count": 533
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -1.8372000008821487,
"min": -10.0,
"max": 5.9040000438690186,
"count": 533
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.16701818189837717,
"min": -0.6666666666666666,
"max": 0.36900000274181366,
"count": 533
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -1.8372000008821487,
"min": -10.0,
"max": 5.9040000438690186,
"count": 533
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 533
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 533
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01781173248697693,
"min": 0.011581762720985959,
"max": 0.022112421380976837,
"count": 248
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01781173248697693,
"min": 0.011581762720985959,
"max": 0.022112421380976837,
"count": 248
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 6.619557328425192e-05,
"min": 1.2819827558037862e-07,
"max": 0.007151341903954745,
"count": 248
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 6.619557328425192e-05,
"min": 1.2819827558037862e-07,
"max": 0.007151341903954745,
"count": 248
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 6.617105118493783e-05,
"min": 1.288317937072255e-07,
"max": 0.007147595100104809,
"count": 248
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 6.617105118493783e-05,
"min": 1.288317937072255e-07,
"max": 0.007147595100104809,
"count": 248
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 248
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 248
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 248
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 248
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 248
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 248
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690974671",
"python_version": "3.9.17 (main, Jul 5 2023, 20:41:20) \n[GCC 11.2.0]",
"command_line_arguments": "/home/jh/anaconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1690987967"
},
"total": 13295.112351631,
"count": 1,
"self": 0.5330448329987121,
"children": {
"run_training.setup": {
"total": 0.009768414000063785,
"count": 1,
"self": 0.009768414000063785
},
"TrainerController.start_learning": {
"total": 13294.569538384001,
"count": 1,
"self": 5.719322810129597,
"children": {
"TrainerController._reset_env": {
"total": 1.9886303630009934,
"count": 27,
"self": 1.9886303630009934
},
"TrainerController.advance": {
"total": 13286.64480745987,
"count": 346829,
"self": 5.669291412561506,
"children": {
"env_step": {
"total": 5226.643601603117,
"count": 346829,
"self": 4467.314352221897,
"children": {
"SubprocessEnvManager._take_step": {
"total": 755.8678143183806,
"count": 346829,
"self": 31.81169897282598,
"children": {
"TorchPolicy.evaluate": {
"total": 724.0561153455546,
"count": 688772,
"self": 724.0561153455546
}
}
},
"workers": {
"total": 3.461435062838973,
"count": 346829,
"self": 0.0,
"children": {
"worker_root": {
"total": 13264.300721925229,
"count": 346829,
"is_parallel": true,
"self": 9421.340694249127,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019402400012040744,
"count": 2,
"is_parallel": true,
"self": 0.00047446000007767,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014657800011264044,
"count": 8,
"is_parallel": true,
"self": 0.0014657800011264044
}
}
},
"UnityEnvironment.step": {
"total": 0.020414718000210996,
"count": 1,
"is_parallel": true,
"self": 0.00042586900053720456,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000597819000176969,
"count": 1,
"is_parallel": true,
"self": 0.000597819000176969
},
"communicator.exchange": {
"total": 0.018284440000570612,
"count": 1,
"is_parallel": true,
"self": 0.018284440000570612
},
"steps_from_proto": {
"total": 0.0011065899989262107,
"count": 2,
"is_parallel": true,
"self": 0.0002242499995190883,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0008823399994071224,
"count": 8,
"is_parallel": true,
"self": 0.0008823399994071224
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 3842.9299300630946,
"count": 346828,
"is_parallel": true,
"self": 134.541852679753,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 103.81118051082103,
"count": 346828,
"is_parallel": true,
"self": 103.81118051082103
},
"communicator.exchange": {
"total": 3212.3851369052054,
"count": 346828,
"is_parallel": true,
"self": 3212.3851369052054
},
"steps_from_proto": {
"total": 392.1917599673152,
"count": 693656,
"is_parallel": true,
"self": 77.209227386169,
"children": {
"_process_rank_one_or_two_observation": {
"total": 314.9825325811462,
"count": 2774624,
"is_parallel": true,
"self": 314.9825325811462
}
}
}
}
},
"steps_from_proto": {
"total": 0.03009761300563696,
"count": 52,
"is_parallel": true,
"self": 0.005883859999812557,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0242137530058244,
"count": 208,
"is_parallel": true,
"self": 0.0242137530058244
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 8054.331914444192,
"count": 346829,
"self": 47.68821373466926,
"children": {
"process_trajectory": {
"total": 942.3445540715256,
"count": 346829,
"self": 940.025219979525,
"children": {
"RLTrainer._checkpoint": {
"total": 2.319334092000645,
"count": 10,
"self": 2.319334092000645
}
}
},
"_update_policy": {
"total": 7064.299146637997,
"count": 249,
"self": 502.3294806941003,
"children": {
"TorchPOCAOptimizer.update": {
"total": 6561.969665943897,
"count": 7459,
"self": 6561.969665943897
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.599984772037715e-07,
"count": 1,
"self": 6.599984772037715e-07
},
"TrainerController._save_models": {
"total": 0.21677709100185893,
"count": 1,
"self": 0.023412786002154462,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19336430499970447,
"count": 1,
"self": 0.19336430499970447
}
}
}
}
}
}
}