poca-SoccerTwos / run_logs /timers.json
Mtc2's picture
First Push`
53ad9b8
raw
history blame
15.6 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.9046963453292847,
"min": 1.9046963453292847,
"max": 3.2957520484924316,
"count": 538
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 35838.765625,
"min": 22702.98828125,
"max": 124771.578125,
"count": 538
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 73.53731343283582,
"min": 42.0,
"max": 999.0,
"count": 538
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19708.0,
"min": 11088.0,
"max": 29448.0,
"count": 538
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1524.2106331334362,
"min": 1191.6479442202194,
"max": 1541.8015807507788,
"count": 477
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 204244.22483988045,
"min": 2383.295888440439,
"max": 357697.9667341807,
"count": 477
},
"SoccerTwos.Step.mean": {
"value": 5379912.0,
"min": 9740.0,
"max": 5379912.0,
"count": 538
},
"SoccerTwos.Step.sum": {
"value": 5379912.0,
"min": 9740.0,
"max": 5379912.0,
"count": 538
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.017882201820611954,
"min": -0.10268796980381012,
"max": 0.1937672346830368,
"count": 538
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 2.414097309112549,
"min": -16.43007469177246,
"max": 30.227689743041992,
"count": 538
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.016559652984142303,
"min": -0.11016736179590225,
"max": 0.18774959444999695,
"count": 538
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 2.23555326461792,
"min": -17.62677764892578,
"max": 29.288936614990234,
"count": 538
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 538
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 538
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.1477170365828055,
"min": -0.6657280015945435,
"max": 0.4405047601177579,
"count": 538
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -19.94179993867874,
"min": -46.371400237083435,
"max": 47.535199880599976,
"count": 538
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.1477170365828055,
"min": -0.6657280015945435,
"max": 0.4405047601177579,
"count": 538
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -19.94179993867874,
"min": -46.371400237083435,
"max": 47.535199880599976,
"count": 538
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 538
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 538
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.017182656736986247,
"min": 0.010294425412818478,
"max": 0.02685851512942463,
"count": 256
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.017182656736986247,
"min": 0.010294425412818478,
"max": 0.02685851512942463,
"count": 256
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10605661422014237,
"min": 2.5658693327083406e-07,
"max": 0.1136317012210687,
"count": 256
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10605661422014237,
"min": 2.5658693327083406e-07,
"max": 0.1136317012210687,
"count": 256
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10816983431577683,
"min": 2.541332889904879e-07,
"max": 0.1156247744957606,
"count": 256
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10816983431577683,
"min": 2.541332889904879e-07,
"max": 0.1156247744957606,
"count": 256
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 256
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 256
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 256
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 256
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 256
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 256
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1691916859",
"python_version": "3.9.17 (main, Jul 5 2023, 20:47:11) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\G:\\miniconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1691932228"
},
"total": 15368.918933300001,
"count": 1,
"self": 4.93214799999987,
"children": {
"run_training.setup": {
"total": 0.09903299999999993,
"count": 1,
"self": 0.09903299999999993
},
"TrainerController.start_learning": {
"total": 15363.887752300001,
"count": 1,
"self": 7.99969550051901,
"children": {
"TrainerController._reset_env": {
"total": 4.435616499999727,
"count": 27,
"self": 4.435616499999727
},
"TrainerController.advance": {
"total": 15351.320944599482,
"count": 363702,
"self": 8.844116099715393,
"children": {
"env_step": {
"total": 5833.154008899425,
"count": 363702,
"self": 4507.485278099248,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1320.4052337004516,
"count": 363702,
"self": 51.697666200741196,
"children": {
"TorchPolicy.evaluate": {
"total": 1268.7075674997104,
"count": 682466,
"self": 1268.7075674997104
}
}
},
"workers": {
"total": 5.263497099725668,
"count": 363701,
"self": 0.0,
"children": {
"worker_root": {
"total": 15348.313708099879,
"count": 363701,
"is_parallel": true,
"self": 11778.164801800305,
"children": {
"steps_from_proto": {
"total": 0.04411069999191408,
"count": 54,
"is_parallel": true,
"self": 0.009417199994333103,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.03469349999758098,
"count": 216,
"is_parallel": true,
"self": 0.03469349999758098
}
}
},
"UnityEnvironment.step": {
"total": 3570.104795599581,
"count": 363701,
"is_parallel": true,
"self": 180.70742649916792,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 138.96724710021127,
"count": 363701,
"is_parallel": true,
"self": 138.96724710021127
},
"communicator.exchange": {
"total": 2681.818641899953,
"count": 363701,
"is_parallel": true,
"self": 2681.818641899953
},
"steps_from_proto": {
"total": 568.6114801002484,
"count": 727402,
"is_parallel": true,
"self": 124.4156686998341,
"children": {
"_process_rank_one_or_two_observation": {
"total": 444.1958114004143,
"count": 2909608,
"is_parallel": true,
"self": 444.1958114004143
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 9509.322819600342,
"count": 363701,
"self": 59.654099500648954,
"children": {
"process_trajectory": {
"total": 1319.1030308996862,
"count": 363701,
"self": 1317.5299255996854,
"children": {
"RLTrainer._checkpoint": {
"total": 1.5731053000008615,
"count": 10,
"self": 1.5731053000008615
}
}
},
"_update_policy": {
"total": 8130.565689200006,
"count": 257,
"self": 868.8208004000171,
"children": {
"TorchPOCAOptimizer.update": {
"total": 7261.744888799989,
"count": 7719,
"self": 7261.744888799989
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1000010999850929e-06,
"count": 1,
"self": 1.1000010999850929e-06
},
"TrainerController._save_models": {
"total": 0.13149460000022373,
"count": 1,
"self": 0.009397899999385118,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12209670000083861,
"count": 1,
"self": 0.12209670000083861
}
}
}
}
}
}
}