poca-SoccerTwos / run_logs /timers.json
Maxph2211's picture
First Push
222e83e
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.252148151397705,
"min": 3.2340750694274902,
"max": 3.2956032752990723,
"count": 22
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 62233.10546875,
"min": 27394.943359375,
"max": 110437.109375,
"count": 22
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 841.8333333333334,
"min": 438.1818181818182,
"max": 999.0,
"count": 22
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 20204.0,
"min": 16556.0,
"max": 24620.0,
"count": 22
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1202.712235612661,
"min": 1198.096716181842,
"max": 1204.96588437155,
"count": 19
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 4810.848942450644,
"min": 2397.5438868584306,
"max": 12030.489174378994,
"count": 19
},
"SoccerTwos.Step.mean": {
"value": 219448.0,
"min": 9328.0,
"max": 219448.0,
"count": 22
},
"SoccerTwos.Step.sum": {
"value": 219448.0,
"min": 9328.0,
"max": 219448.0,
"count": 22
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.021987518295645714,
"min": -0.03358123451471329,
"max": -0.00914370734244585,
"count": 22
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.26385021209716797,
"min": -0.3737238049507141,
"max": -0.1097244918346405,
"count": 22
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.025403909385204315,
"min": -0.035054437816143036,
"max": -0.009414930827915668,
"count": 22
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.3048469126224518,
"min": -0.48784586787223816,
"max": -0.10248959809541702,
"count": 22
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 22
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 22
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.3333333333333333,
"min": -0.521013331413269,
"max": 0.1428000032901764,
"count": 22
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -4.0,
"min": -7.815199971199036,
"max": 1.7136000394821167,
"count": 22
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.3333333333333333,
"min": -0.521013331413269,
"max": 0.1428000032901764,
"count": 22
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -4.0,
"min": -7.815199971199036,
"max": 1.7136000394821167,
"count": 22
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 22
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 22
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.019089772222408406,
"min": 0.014227441156981513,
"max": 0.019250484396858763,
"count": 10
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.019089772222408406,
"min": 0.014227441156981513,
"max": 0.019250484396858763,
"count": 10
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.00303266728684927,
"min": 3.1557437978335656e-05,
"max": 0.005675278091803193,
"count": 10
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.00303266728684927,
"min": 3.1557437978335656e-05,
"max": 0.005675278091803193,
"count": 10
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.003077593092651417,
"min": 3.051361230366941e-05,
"max": 0.005773659151357909,
"count": 10
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.003077593092651417,
"min": 3.051361230366941e-05,
"max": 0.005773659151357909,
"count": 10
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 10
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 10
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 10
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 10
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 10
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 10
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1692630068",
"python_version": "3.9.17 (main, Jul 5 2023, 15:35:09) \n[Clang 14.0.6 ]",
"command_line_arguments": "/Users/maxph2211/anaconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0",
"numpy_version": "1.21.2",
"end_time_seconds": "1692630365"
},
"total": 296.670284458,
"count": 1,
"self": 0.15351054100000283,
"children": {
"run_training.setup": {
"total": 0.013190542000000027,
"count": 1,
"self": 0.013190542000000027
},
"TrainerController.start_learning": {
"total": 296.503583375,
"count": 1,
"self": 0.10988081000192551,
"children": {
"TrainerController._reset_env": {
"total": 1.6036085840000207,
"count": 2,
"self": 1.6036085840000207
},
"TrainerController.advance": {
"total": 294.5489405649981,
"count": 15271,
"self": 0.10133990499599577,
"children": {
"env_step": {
"total": 142.389375107002,
"count": 15271,
"self": 122.13004312600218,
"children": {
"SubprocessEnvManager._take_step": {
"total": 20.162998269997207,
"count": 15271,
"self": 0.5817577979946584,
"children": {
"TorchPolicy.evaluate": {
"total": 19.58124047200255,
"count": 30328,
"self": 19.58124047200255
}
}
},
"workers": {
"total": 0.09633371100261234,
"count": 15270,
"self": 0.0,
"children": {
"worker_root": {
"total": 294.5319253670018,
"count": 15270,
"is_parallel": true,
"self": 190.35275932700335,
"children": {
"steps_from_proto": {
"total": 0.001865914999996665,
"count": 4,
"is_parallel": true,
"self": 0.00035421000005775305,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001511704999938912,
"count": 16,
"is_parallel": true,
"self": 0.001511704999938912
}
}
},
"UnityEnvironment.step": {
"total": 104.17730012499847,
"count": 15270,
"is_parallel": true,
"self": 6.711494018002739,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 3.179639326997007,
"count": 15270,
"is_parallel": true,
"self": 3.179639326997007
},
"communicator.exchange": {
"total": 79.02621381300065,
"count": 15270,
"is_parallel": true,
"self": 79.02621381300065
},
"steps_from_proto": {
"total": 15.259952966998078,
"count": 30540,
"is_parallel": true,
"self": 2.4347930950026964,
"children": {
"_process_rank_one_or_two_observation": {
"total": 12.825159871995382,
"count": 122160,
"is_parallel": true,
"self": 12.825159871995382
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 152.05822555300006,
"count": 15270,
"self": 1.4833567860010817,
"children": {
"process_trajectory": {
"total": 21.506046055999043,
"count": 15270,
"self": 21.506046055999043
},
"_update_policy": {
"total": 129.06882271099994,
"count": 10,
"self": 14.724975051999323,
"children": {
"TorchPOCAOptimizer.update": {
"total": 114.34384765900062,
"count": 300,
"self": 114.34384765900062
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.499999696847226e-07,
"count": 1,
"self": 7.499999696847226e-07
},
"TrainerController._save_models": {
"total": 0.24115266600000496,
"count": 1,
"self": 0.0023205410000173288,
"children": {
"RLTrainer._checkpoint": {
"total": 0.23883212499998763,
"count": 1,
"self": 0.23883212499998763
}
}
}
}
}
}
}