poca-SoccerTwos / run_logs /timers.json
MarkieMark1's picture
First try
c0d0d1a
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.5261714458465576,
"min": 2.4409797191619873,
"max": 3.295668601989746,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 49957.56640625,
"min": 14459.0859375,
"max": 138509.296875,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 465.6,
"max": 999.0,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19980.0,
"min": 10872.0,
"max": 29364.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1211.290473522263,
"min": 1195.5400029679063,
"max": 1214.0535900671598,
"count": 116
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 2422.580947044526,
"min": 2392.6772923946123,
"max": 14437.628298150885,
"count": 116
},
"SoccerTwos.Step.mean": {
"value": 4999424.0,
"min": 9698.0,
"max": 4999424.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 4999424.0,
"min": 9698.0,
"max": 4999424.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 5.629620773106581e-06,
"min": -0.019345436245203018,
"max": 0.020470567047595978,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 5.629620864056051e-05,
"min": -0.2685878276824951,
"max": 0.2894434630870819,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 6.33082663625828e-06,
"min": -0.019657254219055176,
"max": 0.023300018161535263,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 6.33082672720775e-05,
"min": -0.2605897784233093,
"max": 0.31250762939453125,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.0,
"min": -0.5714285714285714,
"max": 0.3377600034077962,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 0.0,
"min": -8.0,
"max": 5.066400051116943,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.0,
"min": -0.5714285714285714,
"max": 0.3377600034077962,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 0.0,
"min": -8.0,
"max": 5.066400051116943,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.018140532452768334,
"min": 0.011242725659394636,
"max": 0.02323831706501854,
"count": 229
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.018140532452768334,
"min": 0.011242725659394636,
"max": 0.02323831706501854,
"count": 229
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 5.686411203020825e-10,
"min": 4.356862296865197e-10,
"max": 0.005476553350066146,
"count": 229
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 5.686411203020825e-10,
"min": 4.356862296865197e-10,
"max": 0.005476553350066146,
"count": 229
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 7.025304689841269e-10,
"min": 5.740743436177809e-10,
"max": 0.0058096463326364756,
"count": 229
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 7.025304689841269e-10,
"min": 5.740743436177809e-10,
"max": 0.0058096463326364756,
"count": 229
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 229
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 229
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 229
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 229
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 229
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 229
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678980597",
"python_version": "3.8.16 (default, Dec 7 2022, 00:00:00) \n[GCC 12.2.1 20221121 (Red Hat 12.2.1-4)]",
"command_line_arguments": "/home/mark/.local/bin/mlagents-learn ./config/poca/SoccerTwosHF.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1678997311"
},
"total": 16714.018152524997,
"count": 1,
"self": 0.37192011299703154,
"children": {
"run_training.setup": {
"total": 0.01815122900006827,
"count": 1,
"self": 0.01815122900006827
},
"TrainerController.start_learning": {
"total": 16713.628081183,
"count": 1,
"self": 8.54478412795288,
"children": {
"TrainerController._reset_env": {
"total": 5.1029463689774275,
"count": 25,
"self": 5.1029463689774275
},
"TrainerController.advance": {
"total": 16699.76949094807,
"count": 326305,
"self": 9.731410276515817,
"children": {
"env_step": {
"total": 7957.7412364652555,
"count": 326305,
"self": 6559.395777256734,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1392.3603186080982,
"count": 326305,
"self": 53.058789789301954,
"children": {
"TorchPolicy.evaluate": {
"total": 1339.3015288187962,
"count": 648258,
"self": 1339.3015288187962
}
}
},
"workers": {
"total": 5.98514060042362,
"count": 326305,
"self": 0.0,
"children": {
"worker_root": {
"total": 16697.113703385246,
"count": 326305,
"is_parallel": true,
"self": 11255.907194637937,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0037339599984989036,
"count": 2,
"is_parallel": true,
"self": 0.0014701959953526966,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002263764003146207,
"count": 8,
"is_parallel": true,
"self": 0.002263764003146207
}
}
},
"UnityEnvironment.step": {
"total": 0.03327965999778826,
"count": 1,
"is_parallel": true,
"self": 0.001111563997255871,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0009674949978943914,
"count": 1,
"is_parallel": true,
"self": 0.0009674949978943914
},
"communicator.exchange": {
"total": 0.02746111899978132,
"count": 1,
"is_parallel": true,
"self": 0.02746111899978132
},
"steps_from_proto": {
"total": 0.0037394820028566755,
"count": 2,
"is_parallel": true,
"self": 0.0007324269972741604,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003007055005582515,
"count": 8,
"is_parallel": true,
"self": 0.003007055005582515
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 5441.120992092303,
"count": 326304,
"is_parallel": true,
"self": 290.55753175004065,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 228.94792979838894,
"count": 326304,
"is_parallel": true,
"self": 228.94792979838894
},
"communicator.exchange": {
"total": 4035.7824196574074,
"count": 326304,
"is_parallel": true,
"self": 4035.7824196574074
},
"steps_from_proto": {
"total": 885.8331108864659,
"count": 652608,
"is_parallel": true,
"self": 165.5936493067602,
"children": {
"_process_rank_one_or_two_observation": {
"total": 720.2394615797057,
"count": 2610432,
"is_parallel": true,
"self": 720.2394615797057
}
}
}
}
},
"steps_from_proto": {
"total": 0.08551665500635863,
"count": 48,
"is_parallel": true,
"self": 0.017105571012507426,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.06841108399385121,
"count": 192,
"is_parallel": true,
"self": 0.06841108399385121
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 8732.2968442063,
"count": 326305,
"self": 61.45063764400766,
"children": {
"process_trajectory": {
"total": 1102.5486385373079,
"count": 326305,
"self": 1100.141003625311,
"children": {
"RLTrainer._checkpoint": {
"total": 2.4076349119968654,
"count": 10,
"self": 2.4076349119968654
}
}
},
"_update_policy": {
"total": 7568.297568024984,
"count": 229,
"self": 935.4100611142021,
"children": {
"TorchPOCAOptimizer.update": {
"total": 6632.887506910782,
"count": 6870,
"self": 6632.887506910782
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3269964256323874e-06,
"count": 1,
"self": 1.3269964256323874e-06
},
"TrainerController._save_models": {
"total": 0.21085841100284597,
"count": 1,
"self": 0.00202011000510538,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2088383009977406,
"count": 1,
"self": 0.2088383009977406
}
}
}
}
}
}
}