poca-SoccerTwos / run_logs /timers.json
varevshatyan's picture
First Push
d2f2ca1
raw
history blame contribute delete
No virus
20 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.6904041767120361,
"min": 1.6904041767120361,
"max": 3.295828342437744,
"count": 1000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 31373.90234375,
"min": 30855.255859375,
"max": 107373.4921875,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 62.223684210526315,
"min": 41.042735042735046,
"max": 999.0,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 18916.0,
"min": 14456.0,
"max": 25876.0,
"count": 1000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1505.5654356524688,
"min": 1190.9937210547932,
"max": 1528.816168391446,
"count": 998
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 228845.94621917524,
"min": 2384.1991119945988,
"max": 341659.6208856196,
"count": 998
},
"SoccerTwos.Step.mean": {
"value": 9999960.0,
"min": 9684.0,
"max": 9999960.0,
"count": 1000
},
"SoccerTwos.Step.sum": {
"value": 9999960.0,
"min": 9684.0,
"max": 9999960.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.021533388644456863,
"min": -0.14438019692897797,
"max": 0.18974851071834564,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -3.2946083545684814,
"min": -20.496976852416992,
"max": 23.52881622314453,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.020931841805577278,
"min": -0.138943612575531,
"max": 0.18486110866069794,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -3.2025718688964844,
"min": -20.91425323486328,
"max": 22.92277717590332,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.009445750635433821,
"min": -0.625,
"max": 0.4306620667720663,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 1.4451998472213745,
"min": -69.94459998607635,
"max": 42.50040006637573,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.009445750635433821,
"min": -0.625,
"max": 0.4306620667720663,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 1.4451998472213745,
"min": -69.94459998607635,
"max": 42.50040006637573,
"count": 1000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.013262719112390187,
"min": 0.007267597714962904,
"max": 0.017278411099687217,
"count": 243
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.013262719112390187,
"min": 0.007267597714962904,
"max": 0.017278411099687217,
"count": 243
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.08443939536809922,
"min": 0.0016251095352345147,
"max": 0.11132827904075385,
"count": 243
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.08443939536809922,
"min": 0.0016251095352345147,
"max": 0.11132827904075385,
"count": 243
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.08609315231442452,
"min": 0.001621522156347055,
"max": 0.11312161087989807,
"count": 243
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.08609315231442452,
"min": 0.001621522156347055,
"max": 0.11312161087989807,
"count": 243
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 243
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 243
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.2,
"min": 0.2,
"max": 0.2,
"count": 243
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.2,
"min": 0.2,
"max": 0.2,
"count": 243
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005,
"min": 0.005,
"max": 0.005,
"count": 243
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005,
"min": 0.005,
"max": 0.005,
"count": 243
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1681400793",
"python_version": "3.9.16 (main, Mar 8 2023, 14:00:05) \n[GCC 11.2.0]",
"command_line_arguments": "/anaconda/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --force --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1681418891"
},
"total": 18097.326334434,
"count": 1,
"self": 1.4654128270012734,
"children": {
"run_training.setup": {
"total": 0.06966816799968001,
"count": 1,
"self": 0.06966816799968001
},
"TrainerController.start_learning": {
"total": 18095.791253439,
"count": 1,
"self": 14.124513753697101,
"children": {
"TrainerController._reset_env": {
"total": 13.560869624994666,
"count": 100,
"self": 13.560869624994666
},
"TrainerController.advance": {
"total": 18066.703308018306,
"count": 690896,
"self": 14.496848152270104,
"children": {
"env_step": {
"total": 12382.015595357303,
"count": 690896,
"self": 8790.504927490492,
"children": {
"SubprocessEnvManager._take_step": {
"total": 3583.6868403290887,
"count": 690896,
"self": 89.14229998571545,
"children": {
"TorchPolicy.evaluate": {
"total": 3494.5445403433732,
"count": 1266756,
"self": 3494.5445403433732
}
}
},
"workers": {
"total": 7.823827537722082,
"count": 690896,
"self": 0.0,
"children": {
"worker_root": {
"total": 18068.245141560226,
"count": 690896,
"is_parallel": true,
"self": 10914.655148137948,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.003106338000179676,
"count": 2,
"is_parallel": true,
"self": 0.0008824550004646881,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002223882999714988,
"count": 8,
"is_parallel": true,
"self": 0.002223882999714988
}
}
},
"UnityEnvironment.step": {
"total": 0.025750114999937068,
"count": 1,
"is_parallel": true,
"self": 0.0006053669999346312,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005618700001832622,
"count": 1,
"is_parallel": true,
"self": 0.0005618700001832622
},
"communicator.exchange": {
"total": 0.022749475999717106,
"count": 1,
"is_parallel": true,
"self": 0.022749475999717106
},
"steps_from_proto": {
"total": 0.001833402000102069,
"count": 2,
"is_parallel": true,
"self": 0.0004115790002288122,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014218229998732568,
"count": 8,
"is_parallel": true,
"self": 0.0014218229998732568
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 7153.403618047258,
"count": 690895,
"is_parallel": true,
"self": 391.8650398342852,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 271.32354886834355,
"count": 690895,
"is_parallel": true,
"self": 271.32354886834355
},
"communicator.exchange": {
"total": 5322.061446691012,
"count": 690895,
"is_parallel": true,
"self": 5322.061446691012
},
"steps_from_proto": {
"total": 1168.153582653616,
"count": 1381790,
"is_parallel": true,
"self": 249.47668309119263,
"children": {
"_process_rank_one_or_two_observation": {
"total": 918.6768995624234,
"count": 5527160,
"is_parallel": true,
"self": 918.6768995624234
}
}
}
}
},
"steps_from_proto": {
"total": 0.18637537501990664,
"count": 198,
"is_parallel": true,
"self": 0.03953962803598188,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.14683574698392476,
"count": 792,
"is_parallel": true,
"self": 0.14683574698392476
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 5670.190864508732,
"count": 690896,
"self": 105.78475479125427,
"children": {
"process_trajectory": {
"total": 2497.7491164954786,
"count": 690896,
"self": 2469.2401843234825,
"children": {
"RLTrainer._checkpoint": {
"total": 28.508932171996094,
"count": 20,
"self": 28.508932171996094
}
}
},
"_update_policy": {
"total": 3066.6569932219995,
"count": 243,
"self": 2197.973723531895,
"children": {
"TorchPOCAOptimizer.update": {
"total": 868.6832696901047,
"count": 9720,
"self": 868.6832696901047
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1000010999850929e-06,
"count": 1,
"self": 1.1000010999850929e-06
},
"TrainerController._save_models": {
"total": 1.4025609420023102,
"count": 1,
"self": 0.1260476670031494,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2765132749991608,
"count": 1,
"self": 1.2765132749991608
}
}
}
}
}
}
}