poca-SoccerTwos / run_logs /timers.json
D3MI4N's picture
First Push
8449394 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.087160110473633,
"min": 3.087160110473633,
"max": 3.2896835803985596,
"count": 10
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 308716.0,
"min": 308716.0,
"max": 332652.8125,
"count": 10
},
"SoccerTwos.Step.mean": {
"value": 499972.0,
"min": 49982.0,
"max": 499972.0,
"count": 10
},
"SoccerTwos.Step.sum": {
"value": 499972.0,
"min": 49982.0,
"max": 499972.0,
"count": 10
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.0023185666650533676,
"min": 0.0023185666650533676,
"max": 0.08745706081390381,
"count": 10
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 1.875720500946045,
"min": 1.875720500946045,
"max": 70.84021759033203,
"count": 10
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.002253257669508457,
"min": 0.002253257669508457,
"max": 0.08721368759870529,
"count": 10
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 1.822885513305664,
"min": 1.822885513305664,
"max": 70.6430892944336,
"count": 10
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 719.0,
"min": 712.3793103448276,
"max": 966.24,
"count": 10
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 94908.0,
"min": 82636.0,
"max": 105388.0,
"count": 10
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1196.4524119957505,
"min": 1196.4524119957505,
"max": 1199.3971709642813,
"count": 10
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 40679.382007855515,
"min": 4788.86364387737,
"max": 40708.74182975941,
"count": 10
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 10
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 10
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.11034545483011188,
"min": -0.18967741920102027,
"max": 0.07278571331075259,
"count": 10
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -7.282800018787384,
"min": -11.759999990463257,
"max": 4.075999945402145,
"count": 10
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.11034545483011188,
"min": -0.18967741920102027,
"max": 0.07278571331075259,
"count": 10
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -7.282800018787384,
"min": -11.759999990463257,
"max": 4.075999945402145,
"count": 10
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01744806864074538,
"min": 0.015331069930107332,
"max": 0.018147315255191643,
"count": 10
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.05234420592223614,
"min": 0.030662139860214665,
"max": 0.053367810678219024,
"count": 10
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0025519731776512344,
"min": 0.0005939859336649533,
"max": 0.002631639942410402,
"count": 10
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.007655919532953703,
"min": 0.0011879718673299066,
"max": 0.007655919532953703,
"count": 10
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.0025551888055512484,
"min": 0.000599109773111195,
"max": 0.002695514218066819,
"count": 10
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.007665566416653746,
"min": 0.00119821954622239,
"max": 0.007665566416653746,
"count": 10
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 1.602649465786666e-05,
"min": 1.602649465786666e-05,
"max": 0.0002814474061842,
"count": 10
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 4.8079483973599984e-05,
"min": 4.8079483973599984e-05,
"max": 0.0006775944741352,
"count": 10
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.10534213333333335,
"min": 0.10534213333333335,
"max": 0.1938158,
"count": 10
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.31602640000000004,
"min": 0.23119040000000002,
"max": 0.5258648,
"count": 10
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.0002765724533333333,
"min": 0.0002765724533333333,
"max": 0.00469140842,
"count": 10
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.00082971736,
"min": 0.00082971736,
"max": 0.01130065352,
"count": 10
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1725555607",
"python_version": "3.10.12 (main, Jul 5 2023, 15:34:07) [Clang 14.0.6 ]",
"command_line_arguments": "/Users/demianpablo/Anaconda/anaconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.2",
"numpy_version": "1.23.5",
"end_time_seconds": "1725559258"
},
"total": 3650.9522501010006,
"count": 1,
"self": 0.5221333670006061,
"children": {
"run_training.setup": {
"total": 0.33066640099968936,
"count": 1,
"self": 0.33066640099968936
},
"TrainerController.start_learning": {
"total": 3650.0994503330003,
"count": 1,
"self": 0.6862437807676542,
"children": {
"TrainerController._reset_env": {
"total": 7.137531752999166,
"count": 5,
"self": 7.137531752999166
},
"TrainerController.advance": {
"total": 3642.048352105232,
"count": 31618,
"self": 0.7200315151103496,
"children": {
"env_step": {
"total": 2763.261466298126,
"count": 31618,
"self": 2668.9445393230408,
"children": {
"SubprocessEnvManager._take_step": {
"total": 93.81658513307684,
"count": 31618,
"self": 4.104795521957385,
"children": {
"TorchPolicy.evaluate": {
"total": 89.71178961111946,
"count": 62768,
"self": 89.71178961111946
}
}
},
"workers": {
"total": 0.5003418420083108,
"count": 31618,
"self": 0.0,
"children": {
"worker_root": {
"total": 3642.9348597131,
"count": 31618,
"is_parallel": true,
"self": 1054.4960506109346,
"children": {
"steps_from_proto": {
"total": 0.013129732999004773,
"count": 10,
"is_parallel": true,
"self": 0.0018686650037125219,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.011261067995292251,
"count": 40,
"is_parallel": true,
"self": 0.011261067995292251
}
}
},
"UnityEnvironment.step": {
"total": 2588.4256793691666,
"count": 31618,
"is_parallel": true,
"self": 7.646084514245558,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 48.168013340010475,
"count": 31618,
"is_parallel": true,
"self": 48.168013340010475
},
"communicator.exchange": {
"total": 2442.7877109818364,
"count": 31618,
"is_parallel": true,
"self": 2442.7877109818364
},
"steps_from_proto": {
"total": 89.82387053307411,
"count": 63236,
"is_parallel": true,
"self": 12.072774706288328,
"children": {
"_process_rank_one_or_two_observation": {
"total": 77.75109582678579,
"count": 252944,
"is_parallel": true,
"self": 77.75109582678579
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 878.0668542919957,
"count": 31618,
"self": 5.703881939813982,
"children": {
"process_trajectory": {
"total": 102.4580839291848,
"count": 31618,
"self": 102.19574840618588,
"children": {
"RLTrainer._checkpoint": {
"total": 0.26233552299891016,
"count": 1,
"self": 0.26233552299891016
}
}
},
"_update_policy": {
"total": 769.9048884229969,
"count": 24,
"self": 83.24982431101671,
"children": {
"TorchPOCAOptimizer.update": {
"total": 686.6550641119802,
"count": 960,
"self": 686.6550641119802
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.770013326080516e-07,
"count": 1,
"self": 7.770013326080516e-07
},
"TrainerController._save_models": {
"total": 0.22732191700015392,
"count": 1,
"self": 0.0011384949993953342,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22618342200075858,
"count": 1,
"self": 0.22618342200075858
}
}
}
}
}
}
}