poca-SoccerTwos / run_logs /timers.json
1aurent's picture
NO GPU GANG, 1M on colab
20eb341
raw
history blame
20.1 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.1853554248809814,
"min": 3.165717601776123,
"max": 3.256704092025757,
"count": 90
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 59120.1953125,
"min": 22107.51953125,
"max": 104214.53125,
"count": 90
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 836.5,
"min": 426.27272727272725,
"max": 999.0,
"count": 90
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 20076.0,
"min": 13232.0,
"max": 26708.0,
"count": 90
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1183.3640282982788,
"min": 1181.9704321645243,
"max": 1200.795990516056,
"count": 75
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 2366.7280565965575,
"min": 2363.9408643290485,
"max": 11929.302938331019,
"count": 75
},
"SoccerTwos.Step.mean": {
"value": 999948.0,
"min": 109698.0,
"max": 999948.0,
"count": 90
},
"SoccerTwos.Step.sum": {
"value": 999948.0,
"min": 109698.0,
"max": 999948.0,
"count": 90
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.007164405658841133,
"min": -0.014982464723289013,
"max": 0.029016362503170967,
"count": 90
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.0859728679060936,
"min": -0.15068447589874268,
"max": 0.42887791991233826,
"count": 90
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.007166388910263777,
"min": -0.01595994271337986,
"max": 0.029594743624329567,
"count": 90
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.08599666506052017,
"min": -0.19151932001113892,
"max": 0.4353296160697937,
"count": 90
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 90
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 90
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.16666666666666666,
"min": -0.5833333333333334,
"max": 0.32618666092554727,
"count": 90
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -2.0,
"min": -7.710799992084503,
"max": 4.892799913883209,
"count": 90
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.16666666666666666,
"min": -0.5833333333333334,
"max": 0.32618666092554727,
"count": 90
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -2.0,
"min": -7.710799992084503,
"max": 4.892799913883209,
"count": 90
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 90
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 90
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.017051780303396904,
"min": 0.010264106953400187,
"max": 0.021436606487259268,
"count": 41
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.017051780303396904,
"min": 0.010264106953400187,
"max": 0.021436606487259268,
"count": 41
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.00041840440680971367,
"min": 7.760814499609599e-06,
"max": 0.004851551392736534,
"count": 41
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.00041840440680971367,
"min": 7.760814499609599e-06,
"max": 0.004851551392736534,
"count": 41
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.000418744977893463,
"min": 8.187464648775252e-06,
"max": 0.005020396752903859,
"count": 41
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.000418744977893463,
"min": 8.187464648775252e-06,
"max": 0.005020396752903859,
"count": 41
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 41
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 41
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 41
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 41
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 41
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 41
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1689262993",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./ml-agents/config/poca/SoccerTwos.yaml --env=./SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1689265045"
},
"total": 2051.8194522780004,
"count": 1,
"self": 0.38692356800038397,
"children": {
"run_training.setup": {
"total": 0.04912061800041556,
"count": 1,
"self": 0.04912061800041556
},
"TrainerController.start_learning": {
"total": 2051.3834080919996,
"count": 1,
"self": 1.4297072327517526,
"children": {
"TrainerController._reset_env": {
"total": 5.8505743470004745,
"count": 5,
"self": 5.8505743470004745
},
"TrainerController.advance": {
"total": 2043.8246442432464,
"count": 58741,
"self": 1.5466441024855158,
"children": {
"env_step": {
"total": 1650.7772208318975,
"count": 58741,
"self": 1301.426601842818,
"children": {
"SubprocessEnvManager._take_step": {
"total": 348.49107166401063,
"count": 58741,
"self": 10.864575685266573,
"children": {
"TorchPolicy.evaluate": {
"total": 337.62649597874406,
"count": 116608,
"self": 337.62649597874406
}
}
},
"workers": {
"total": 0.8595473250688883,
"count": 58741,
"self": 0.0,
"children": {
"worker_root": {
"total": 2047.0026537257418,
"count": 58741,
"is_parallel": true,
"self": 944.84341135869,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00418624399935652,
"count": 2,
"is_parallel": true,
"self": 0.0009510939989922917,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0032351500003642286,
"count": 8,
"is_parallel": true,
"self": 0.0032351500003642286
}
}
},
"UnityEnvironment.step": {
"total": 0.08429741999952967,
"count": 1,
"is_parallel": true,
"self": 0.00380554699950153,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0009018929995363578,
"count": 1,
"is_parallel": true,
"self": 0.0009018929995363578
},
"communicator.exchange": {
"total": 0.07381326599988824,
"count": 1,
"is_parallel": true,
"self": 0.07381326599988824
},
"steps_from_proto": {
"total": 0.005776714000603533,
"count": 2,
"is_parallel": true,
"self": 0.0015629140034434386,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.004213799997160095,
"count": 8,
"is_parallel": true,
"self": 0.004213799997160095
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1102.1488647630513,
"count": 58740,
"is_parallel": true,
"self": 64.87031498196848,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 42.68622125575712,
"count": 58740,
"is_parallel": true,
"self": 42.68622125575712
},
"communicator.exchange": {
"total": 776.845947855023,
"count": 58740,
"is_parallel": true,
"self": 776.845947855023
},
"steps_from_proto": {
"total": 217.74638067030264,
"count": 117480,
"is_parallel": true,
"self": 35.14515632017719,
"children": {
"_process_rank_one_or_two_observation": {
"total": 182.60122435012545,
"count": 469920,
"is_parallel": true,
"self": 182.60122435012545
}
}
}
}
},
"steps_from_proto": {
"total": 0.010377604000495921,
"count": 8,
"is_parallel": true,
"self": 0.001878691000456456,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.008498913000039465,
"count": 32,
"is_parallel": true,
"self": 0.008498913000039465
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 391.50077930886346,
"count": 58741,
"self": 13.187245363963484,
"children": {
"process_trajectory": {
"total": 99.65893102790506,
"count": 58741,
"self": 98.97506984290521,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6838611849998415,
"count": 2,
"self": 0.6838611849998415
}
}
},
"_update_policy": {
"total": 278.6546029169949,
"count": 41,
"self": 176.7638483069668,
"children": {
"TorchPOCAOptimizer.update": {
"total": 101.89075461002813,
"count": 1230,
"self": 101.89075461002813
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.930008673109114e-07,
"count": 1,
"self": 9.930008673109114e-07
},
"TrainerController._save_models": {
"total": 0.2784812760000932,
"count": 1,
"self": 0.002167464999729418,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2763138110003638,
"count": 1,
"self": 0.2763138110003638
}
}
}
}
}
}
}