poca-SoccerTwos / run_logs /timers.json
grib0ed0v's picture
First Push
ceaf6ca verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.6376081705093384,
"min": 1.6376081705093384,
"max": 3.295656681060791,
"count": 570
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 33485.8125,
"min": 21896.3046875,
"max": 106644.78125,
"count": 570
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 41.21848739495798,
"min": 37.56923076923077,
"max": 999.0,
"count": 570
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19620.0,
"min": 16264.0,
"max": 25124.0,
"count": 570
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1623.4118014485496,
"min": 1196.13662078987,
"max": 1633.231621633894,
"count": 561
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 386372.00874475477,
"min": 2393.552613964721,
"max": 422733.370125633,
"count": 561
},
"SoccerTwos.Step.mean": {
"value": 5699984.0,
"min": 9620.0,
"max": 5699984.0,
"count": 570
},
"SoccerTwos.Step.sum": {
"value": 5699984.0,
"min": 9620.0,
"max": 5699984.0,
"count": 570
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.05387691780924797,
"min": -0.1729194074869156,
"max": 0.2150198072195053,
"count": 568
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -10.93701457977295,
"min": -38.56102752685547,
"max": 33.75811004638672,
"count": 568
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.05515898019075394,
"min": -0.17391036450862885,
"max": 0.2114037424325943,
"count": 568
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -11.197273254394531,
"min": -38.782012939453125,
"max": 33.19038772583008,
"count": 568
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 570
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 570
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.0127142850591355,
"min": -0.6329066673914592,
"max": 0.4801157932532461,
"count": 570
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 3.0259998440742493,
"min": -67.31239998340607,
"max": 60.50960010290146,
"count": 570
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.0127142850591355,
"min": -0.6329066673914592,
"max": 0.4801157932532461,
"count": 570
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 3.0259998440742493,
"min": -67.31239998340607,
"max": 60.50960010290146,
"count": 570
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 570
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 570
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01641471466476408,
"min": 0.009967657426993052,
"max": 0.025575490109622477,
"count": 274
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01641471466476408,
"min": 0.009967657426993052,
"max": 0.025575490109622477,
"count": 274
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.12257067436973254,
"min": 0.0003040206989680883,
"max": 0.13078598901629448,
"count": 273
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.12257067436973254,
"min": 0.0003040206989680883,
"max": 0.13078598901629448,
"count": 273
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.12506366968154908,
"min": 0.000316399738464194,
"max": 0.13448004946112632,
"count": 273
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.12506366968154908,
"min": 0.000316399738464194,
"max": 0.13448004946112632,
"count": 273
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 274
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 274
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 274
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 274
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 274
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 274
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1727987160",
"python_version": "3.10.15 (main, Sep 7 2024, 00:20:06) [Clang 16.0.0 (clang-1600.0.26.3)]",
"command_line_arguments": "/Users/agruzdev/.virtualenvs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=../SoccerTwos/SoccerTwos.app --run-id=SoccerTwos --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.4.1",
"numpy_version": "1.23.5",
"end_time_seconds": "1728007885"
},
"total": 20724.929886374972,
"count": 1,
"self": 0.23222574894316494,
"children": {
"run_training.setup": {
"total": 0.017382542020641267,
"count": 1,
"self": 0.017382542020641267
},
"TrainerController.start_learning": {
"total": 20724.680278084008,
"count": 1,
"self": 4.427394074620679,
"children": {
"TrainerController._reset_env": {
"total": 5.8810676268767565,
"count": 29,
"self": 5.8810676268767565
},
"TrainerController.advance": {
"total": 20714.300506591564,
"count": 392692,
"self": 3.721601637545973,
"children": {
"env_step": {
"total": 16636.45671800885,
"count": 392692,
"self": 15981.895410832716,
"children": {
"SubprocessEnvManager._take_step": {
"total": 651.8263394138776,
"count": 392692,
"self": 19.18062538804952,
"children": {
"TorchPolicy.evaluate": {
"total": 632.645714025828,
"count": 718864,
"self": 632.645714025828
}
}
},
"workers": {
"total": 2.73496776225511,
"count": 392691,
"self": 0.0,
"children": {
"worker_root": {
"total": 20714.13082990155,
"count": 392691,
"is_parallel": true,
"self": 5344.687859010883,
"children": {
"steps_from_proto": {
"total": 0.04486004274804145,
"count": 58,
"is_parallel": true,
"self": 0.004962158505804837,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.039897884242236614,
"count": 232,
"is_parallel": true,
"self": 0.039897884242236614
}
}
},
"UnityEnvironment.step": {
"total": 15369.39811084792,
"count": 392691,
"is_parallel": true,
"self": 45.19922956766095,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 275.1343062124215,
"count": 392691,
"is_parallel": true,
"self": 275.1343062124215
},
"communicator.exchange": {
"total": 14484.499393173726,
"count": 392691,
"is_parallel": true,
"self": 14484.499393173726
},
"steps_from_proto": {
"total": 564.5651818941114,
"count": 785382,
"is_parallel": true,
"self": 61.92030272015836,
"children": {
"_process_rank_one_or_two_observation": {
"total": 502.6448791739531,
"count": 3141528,
"is_parallel": true,
"self": 502.6448791739531
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 4074.1221869451692,
"count": 392691,
"self": 36.02085299149621,
"children": {
"process_trajectory": {
"total": 932.9670185355935,
"count": 392691,
"self": 931.8749483296415,
"children": {
"RLTrainer._checkpoint": {
"total": 1.0920702059520409,
"count": 11,
"self": 1.0920702059520409
}
}
},
"_update_policy": {
"total": 3105.1343154180795,
"count": 275,
"self": 360.8566713616019,
"children": {
"TorchPOCAOptimizer.update": {
"total": 2744.2776440564776,
"count": 8253,
"self": 2744.2776440564776
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.07130979094654322,
"count": 1,
"self": 1.7873942852020264e-05,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0712919170036912,
"count": 1,
"self": 0.0712919170036912
}
}
}
}
}
}
}