poca-SoccerTwos / run_logs /timers.json
Nnarruqt's picture
First Push
fcfd422
raw
history blame
15.6 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.694275140762329,
"min": 1.6514707803726196,
"max": 3.2957615852355957,
"count": 612
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 35024.0546875,
"min": 22101.92578125,
"max": 125759.7265625,
"count": 612
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 50.51,
"min": 43.666666666666664,
"max": 999.0,
"count": 612
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 20204.0,
"min": 16236.0,
"max": 24964.0,
"count": 612
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1680.6541918624032,
"min": 1196.8286436159,
"max": 1703.176681790927,
"count": 592
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 336130.83837248065,
"min": 2400.670570872331,
"max": 361419.61620117305,
"count": 592
},
"SoccerTwos.Step.mean": {
"value": 6119994.0,
"min": 9498.0,
"max": 6119994.0,
"count": 612
},
"SoccerTwos.Step.sum": {
"value": 6119994.0,
"min": 9498.0,
"max": 6119994.0,
"count": 612
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.020087381824851036,
"min": -0.0951186865568161,
"max": 0.18743562698364258,
"count": 612
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -3.9973888397216797,
"min": -18.611299514770508,
"max": 32.988670349121094,
"count": 612
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.022601088508963585,
"min": -0.0991855189204216,
"max": 0.18446926772594452,
"count": 612
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -4.497616767883301,
"min": -19.44036102294922,
"max": 32.466590881347656,
"count": 612
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 612
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 612
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.19691055324209394,
"min": -0.5822285711765289,
"max": 0.4100340950218114,
"count": 612
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -39.1852000951767,
"min": -67.867999792099,
"max": 72.1660007238388,
"count": 612
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.19691055324209394,
"min": -0.5822285711765289,
"max": 0.4100340950218114,
"count": 612
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -39.1852000951767,
"min": -67.867999792099,
"max": 72.1660007238388,
"count": 612
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 612
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 612
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.018075651960680263,
"min": 0.009404861269285903,
"max": 0.024723038620625932,
"count": 293
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.018075651960680263,
"min": 0.009404861269285903,
"max": 0.024723038620625932,
"count": 293
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.11116881494720778,
"min": 3.985007430552893e-05,
"max": 0.12105266253153484,
"count": 293
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.11116881494720778,
"min": 3.985007430552893e-05,
"max": 0.12105266253153484,
"count": 293
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.11312747399012248,
"min": 3.145316174292627e-05,
"max": 0.1233666772643725,
"count": 293
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.11312747399012248,
"min": 3.145316174292627e-05,
"max": 0.1233666772643725,
"count": 293
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 293
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 293
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 293
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 293
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 293
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 293
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678116444",
"python_version": "3.9.16 (main, Mar 1 2023, 18:30:21) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\house\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./ml-agents/mlagents/training-envs-executables/SoccerTwos/SoccerTwos.exe --run-id=SoccerTwos --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.13.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1678137475"
},
"total": 21030.2778889,
"count": 1,
"self": 0.2408370000011928,
"children": {
"run_training.setup": {
"total": 0.1250806,
"count": 1,
"self": 0.1250806
},
"TrainerController.start_learning": {
"total": 21029.9119713,
"count": 1,
"self": 10.040809900354361,
"children": {
"TrainerController._reset_env": {
"total": 7.817688400001239,
"count": 31,
"self": 7.817688400001239
},
"TrainerController.advance": {
"total": 21011.903763499642,
"count": 415049,
"self": 10.23915749981461,
"children": {
"env_step": {
"total": 7283.2836391005085,
"count": 415049,
"self": 5581.419482802006,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1695.5341048986973,
"count": 415049,
"self": 60.16887019903493,
"children": {
"TorchPolicy.evaluate": {
"total": 1635.3652346996623,
"count": 775931,
"self": 1635.3652346996623
}
}
},
"workers": {
"total": 6.330051399805274,
"count": 415048,
"self": 0.0,
"children": {
"worker_root": {
"total": 21008.700451899396,
"count": 415048,
"is_parallel": true,
"self": 16559.61056669947,
"children": {
"steps_from_proto": {
"total": 0.05942590000402159,
"count": 62,
"is_parallel": true,
"self": 0.012826999990548593,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.046598900013473,
"count": 248,
"is_parallel": true,
"self": 0.046598900013473
}
}
},
"UnityEnvironment.step": {
"total": 4449.030459299924,
"count": 415048,
"is_parallel": true,
"self": 225.32489310093115,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 179.56339269904132,
"count": 415048,
"is_parallel": true,
"self": 179.56339269904132
},
"communicator.exchange": {
"total": 3327.15446159991,
"count": 415048,
"is_parallel": true,
"self": 3327.15446159991
},
"steps_from_proto": {
"total": 716.9877119000416,
"count": 830096,
"is_parallel": true,
"self": 154.99606210134925,
"children": {
"_process_rank_one_or_two_observation": {
"total": 561.9916497986924,
"count": 3320384,
"is_parallel": true,
"self": 561.9916497986924
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 13718.38096689932,
"count": 415048,
"self": 70.19204039957367,
"children": {
"process_trajectory": {
"total": 1892.5478608997626,
"count": 415048,
"self": 1890.7863625997632,
"children": {
"RLTrainer._checkpoint": {
"total": 1.7614982999994027,
"count": 12,
"self": 1.7614982999994027
}
}
},
"_update_policy": {
"total": 11755.641065599984,
"count": 293,
"self": 1069.7929925998633,
"children": {
"TorchPOCAOptimizer.update": {
"total": 10685.848073000121,
"count": 8790,
"self": 10685.848073000121
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.1000014385208488e-06,
"count": 1,
"self": 2.1000014385208488e-06
},
"TrainerController._save_models": {
"total": 0.14970739999989746,
"count": 1,
"self": 0.0066014000003633555,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1431059999995341,
"count": 1,
"self": 0.1431059999995341
}
}
}
}
}
}
}