poca-SoccerTwos / run_logs /timers.json
rohn132's picture
First Push
4a6e8de
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.8615069389343262,
"min": 1.8247570991516113,
"max": 3.2957539558410645,
"count": 1000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 37170.5703125,
"min": 19369.75390625,
"max": 162207.125,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 82.3,
"min": 65.30666666666667,
"max": 999.0,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19752.0,
"min": 12836.0,
"max": 28176.0,
"count": 1000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1649.070606216243,
"min": 1181.4399466356049,
"max": 1685.667751132636,
"count": 807
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 197888.47274594917,
"min": 2362.8798932712098,
"max": 244632.94311884124,
"count": 807
},
"SoccerTwos.Step.mean": {
"value": 9999975.0,
"min": 9106.0,
"max": 9999975.0,
"count": 1000
},
"SoccerTwos.Step.sum": {
"value": 9999975.0,
"min": 9106.0,
"max": 9999975.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.05094987526535988,
"min": -0.1148688867688179,
"max": 0.10411648452281952,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -6.113985061645508,
"min": -11.486888885498047,
"max": 13.132293701171875,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.049873556941747665,
"min": -0.11518264561891556,
"max": 0.10429093986749649,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -5.984827041625977,
"min": -11.518264770507812,
"max": 12.444042205810547,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.07154333392779032,
"min": -0.6980380955196562,
"max": 0.44224138044077776,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -8.585200071334839,
"min": -48.29700011014938,
"max": 47.355999767780304,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.07154333392779032,
"min": -0.6980380955196562,
"max": 0.44224138044077776,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -8.585200071334839,
"min": -48.29700011014938,
"max": 47.355999767780304,
"count": 1000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.016031810943968595,
"min": 0.010918373941482666,
"max": 0.02573070893364881,
"count": 474
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.016031810943968595,
"min": 0.010918373941482666,
"max": 0.02573070893364881,
"count": 474
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.06607509180903434,
"min": 2.9846235764334024e-08,
"max": 0.08878441825509072,
"count": 474
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.06607509180903434,
"min": 2.9846235764334024e-08,
"max": 0.08878441825509072,
"count": 474
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.06707788606484731,
"min": 2.4387970990839372e-08,
"max": 0.09111692036191622,
"count": 474
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.06707788606484731,
"min": 2.4387970990839372e-08,
"max": 0.09111692036191622,
"count": 474
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 474
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 474
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 474
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 474
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 474
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 474
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1693023828",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/home/rohan/.local/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cu117",
"numpy_version": "1.21.2",
"end_time_seconds": "1693039569"
},
"total": 15740.644438432999,
"count": 1,
"self": 0.1680551569970703,
"children": {
"run_training.setup": {
"total": 0.009554177000154596,
"count": 1,
"self": 0.009554177000154596
},
"TrainerController.start_learning": {
"total": 15740.466829099001,
"count": 1,
"self": 7.999030643601145,
"children": {
"TrainerController._reset_env": {
"total": 2.085067589002847,
"count": 50,
"self": 2.085067589002847
},
"TrainerController.advance": {
"total": 15730.310500759395,
"count": 660249,
"self": 7.84789733963953,
"children": {
"env_step": {
"total": 5567.681112849221,
"count": 660249,
"self": 4623.979751718152,
"children": {
"SubprocessEnvManager._take_step": {
"total": 938.6726461632952,
"count": 660249,
"self": 49.7130526787937,
"children": {
"TorchPolicy.evaluate": {
"total": 888.9595934845015,
"count": 1273102,
"self": 888.9595934845015
}
}
},
"workers": {
"total": 5.028714967772885,
"count": 660249,
"self": 0.0,
"children": {
"worker_root": {
"total": 15730.032973845238,
"count": 660249,
"is_parallel": true,
"self": 12026.926939272706,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0011228769999434007,
"count": 2,
"is_parallel": true,
"self": 0.0002601669993964606,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0008627100005469401,
"count": 8,
"is_parallel": true,
"self": 0.0008627100005469401
}
}
},
"UnityEnvironment.step": {
"total": 0.015508402999785176,
"count": 1,
"is_parallel": true,
"self": 0.0003324809999867284,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00023526699987996835,
"count": 1,
"is_parallel": true,
"self": 0.00023526699987996835
},
"communicator.exchange": {
"total": 0.013968159999876661,
"count": 1,
"is_parallel": true,
"self": 0.013968159999876661
},
"steps_from_proto": {
"total": 0.0009724950000418175,
"count": 2,
"is_parallel": true,
"self": 0.00019491699958962272,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007775780004521948,
"count": 8,
"is_parallel": true,
"self": 0.0007775780004521948
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 3703.056245512519,
"count": 660248,
"is_parallel": true,
"self": 217.78096214883726,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 129.2553541499865,
"count": 660248,
"is_parallel": true,
"self": 129.2553541499865
},
"communicator.exchange": {
"total": 2739.2448621887784,
"count": 660248,
"is_parallel": true,
"self": 2739.2448621887784
},
"steps_from_proto": {
"total": 616.775067024917,
"count": 1320496,
"is_parallel": true,
"self": 113.38610202667496,
"children": {
"_process_rank_one_or_two_observation": {
"total": 503.38896499824205,
"count": 5281984,
"is_parallel": true,
"self": 503.38896499824205
}
}
}
}
},
"steps_from_proto": {
"total": 0.049789060013154085,
"count": 98,
"is_parallel": true,
"self": 0.009487315041042166,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.04030174497211192,
"count": 392,
"is_parallel": true,
"self": 0.04030174497211192
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 10154.781490570535,
"count": 660249,
"self": 71.18242645585633,
"children": {
"process_trajectory": {
"total": 879.5376177606822,
"count": 660249,
"self": 877.777553246684,
"children": {
"RLTrainer._checkpoint": {
"total": 1.760064513998259,
"count": 20,
"self": 1.760064513998259
}
}
},
"_update_policy": {
"total": 9204.061446353997,
"count": 474,
"self": 760.3055499569891,
"children": {
"TorchPOCAOptimizer.update": {
"total": 8443.755896397008,
"count": 14223,
"self": 8443.755896397008
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.520014383364469e-07,
"count": 1,
"self": 5.520014383364469e-07
},
"TrainerController._save_models": {
"total": 0.07222955500037642,
"count": 1,
"self": 0.0008596629995736293,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07136989200080279,
"count": 1,
"self": 0.07136989200080279
}
}
}
}
}
}
}