SoccerTwos / run_logs /timers.json
husseinmo's picture
First Push
72e5c5b verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.1753852367401123,
"min": 3.124070167541504,
"max": 3.295760154724121,
"count": 200
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 62186.7421875,
"min": 6395.45703125,
"max": 125993.9765625,
"count": 200
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 376.0,
"max": 999.0,
"count": 200
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19980.0,
"min": 7096.0,
"max": 27784.0,
"count": 200
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1210.76825350818,
"min": 1198.995732238514,
"max": 1217.7148831130346,
"count": 160
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 9686.14602806544,
"min": 2403.982894094307,
"max": 16937.35401906041,
"count": 160
},
"SoccerTwos.Step.mean": {
"value": 1999810.0,
"min": 9908.0,
"max": 1999810.0,
"count": 200
},
"SoccerTwos.Step.sum": {
"value": 1999810.0,
"min": 9908.0,
"max": 1999810.0,
"count": 200
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.005106488708406687,
"min": -0.04636257141828537,
"max": 0.009470287710428238,
"count": 200
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.05106488615274429,
"min": -0.7037259936332703,
"max": 0.11364345252513885,
"count": 200
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.004664062522351742,
"min": -0.04432212561368942,
"max": 0.00714792450889945,
"count": 200
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.04664062336087227,
"min": -0.7069289684295654,
"max": 0.08577509224414825,
"count": 200
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.0,
"min": -0.6153846153846154,
"max": 0.2777714303561619,
"count": 200
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 0.0,
"min": -8.0,
"max": 3.888800024986267,
"count": 200
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.0,
"min": -0.6153846153846154,
"max": 0.2777714303561619,
"count": 200
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 0.0,
"min": -8.0,
"max": 3.888800024986267,
"count": 200
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.016594474228137795,
"min": 0.012508376075614554,
"max": 0.023593630608714496,
"count": 93
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.016594474228137795,
"min": 0.012508376075614554,
"max": 0.023593630608714496,
"count": 93
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.003200472751632333,
"min": 2.550808407401443e-06,
"max": 0.004651585803367197,
"count": 93
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.003200472751632333,
"min": 2.550808407401443e-06,
"max": 0.004651585803367197,
"count": 93
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.003201262870182594,
"min": 2.5332763167777253e-06,
"max": 0.004683688228639463,
"count": 93
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.003201262870182594,
"min": 2.5332763167777253e-06,
"max": 0.004683688228639463,
"count": 93
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 93
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 93
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 93
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 93
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 93
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 93
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1733390944",
"python_version": "3.10.1 | packaged by conda-forge | (main, Dec 22 2021, 01:39:07) [Clang 11.1.0 ]",
"command_line_arguments": "/opt/anaconda3/envs/hf-rl-course/bin/mlagents-learn ./ml-agents/config/poca/SoccerTwos.yaml --env=./ml-agents/training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos-2 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1",
"numpy_version": "1.23.5",
"end_time_seconds": "1733397444"
},
"total": 6500.227486415999,
"count": 1,
"self": 0.17503141600172967,
"children": {
"run_training.setup": {
"total": 0.014256874972488731,
"count": 1,
"self": 0.014256874972488731
},
"TrainerController.start_learning": {
"total": 6500.038198125025,
"count": 1,
"self": 1.1946516065509059,
"children": {
"TrainerController._reset_env": {
"total": 2.4832318329135887,
"count": 10,
"self": 2.4832318329135887
},
"TrainerController.advance": {
"total": 6496.281524476595,
"count": 130538,
"self": 1.1281268410966732,
"children": {
"env_step": {
"total": 5425.526924707112,
"count": 130538,
"self": 5244.52196714154,
"children": {
"SubprocessEnvManager._take_step": {
"total": 180.20163135998882,
"count": 130538,
"self": 5.278902637772262,
"children": {
"TorchPolicy.evaluate": {
"total": 174.92272872221656,
"count": 259126,
"self": 174.92272872221656
}
}
},
"workers": {
"total": 0.8033262055832893,
"count": 130538,
"self": 0.0,
"children": {
"worker_root": {
"total": 6495.968970293063,
"count": 130538,
"is_parallel": true,
"self": 1445.0736605353886,
"children": {
"steps_from_proto": {
"total": 0.013292915828060359,
"count": 20,
"is_parallel": true,
"self": 0.0017947120359167457,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.011498203792143613,
"count": 80,
"is_parallel": true,
"self": 0.011498203792143613
}
}
},
"UnityEnvironment.step": {
"total": 5050.882016841846,
"count": 130538,
"is_parallel": true,
"self": 12.226883286319207,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 86.68823079677531,
"count": 130538,
"is_parallel": true,
"self": 86.68823079677531
},
"communicator.exchange": {
"total": 4785.622075497755,
"count": 130538,
"is_parallel": true,
"self": 4785.622075497755
},
"steps_from_proto": {
"total": 166.34482726099668,
"count": 261076,
"is_parallel": true,
"self": 19.653808775998186,
"children": {
"_process_rank_one_or_two_observation": {
"total": 146.6910184849985,
"count": 1044304,
"is_parallel": true,
"self": 146.6910184849985
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1069.6264729283866,
"count": 130538,
"self": 11.428957007476129,
"children": {
"process_trajectory": {
"total": 170.9214817569591,
"count": 130538,
"self": 170.5529237979208,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3685579590382986,
"count": 4,
"self": 0.3685579590382986
}
}
},
"_update_policy": {
"total": 887.2760341639514,
"count": 93,
"self": 117.34854670194909,
"children": {
"TorchPOCAOptimizer.update": {
"total": 769.9274874620023,
"count": 2799,
"self": 769.9274874620023
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.33995558321476e-07,
"count": 1,
"self": 3.33995558321476e-07
},
"TrainerController._save_models": {
"total": 0.07878987496951595,
"count": 1,
"self": 0.0005459159729070961,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07824395899660885,
"count": 1,
"self": 0.07824395899660885
}
}
}
}
}
}
}