poca-SoccerTwos / run_logs /timers.json
Yureeh's picture
First Push
4e0b2b9
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.156020402908325,
"min": 3.155545949935913,
"max": 3.295720338821411,
"count": 50
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 60797.578125,
"min": 30602.51171875,
"max": 108363.3125,
"count": 50
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 510.55555555555554,
"max": 999.0,
"count": 50
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19980.0,
"min": 17288.0,
"max": 24052.0,
"count": 50
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1192.1660900320253,
"min": 1191.522966599616,
"max": 1201.5373852769912,
"count": 32
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 4768.664360128101,
"min": 2383.045933199232,
"max": 9612.29908221593,
"count": 32
},
"SoccerTwos.Step.mean": {
"value": 499446.0,
"min": 9042.0,
"max": 499446.0,
"count": 50
},
"SoccerTwos.Step.sum": {
"value": 499446.0,
"min": 9042.0,
"max": 499446.0,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.006087624467909336,
"min": -0.011245258152484894,
"max": 0.07628443837165833,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.06087624654173851,
"min": -0.13048399984836578,
"max": 1.1442666053771973,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.006160677410662174,
"min": -0.012630773708224297,
"max": 0.07636138796806335,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.06160677224397659,
"min": -0.12630774080753326,
"max": 1.1453850269317627,
"count": 50
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 50
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.0,
"min": -0.46153846153846156,
"max": 0.15643333767851195,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 0.0,
"min": -6.0,
"max": 1.8772000521421432,
"count": 50
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.0,
"min": -0.46153846153846156,
"max": 0.15643333767851195,
"count": 50
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 0.0,
"min": -6.0,
"max": 1.8772000521421432,
"count": 50
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01612367565006328,
"min": 0.012100193410879001,
"max": 0.01969011021234716,
"count": 23
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01612367565006328,
"min": 0.012100193410879001,
"max": 0.01969011021234716,
"count": 23
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 4.946423329480846e-06,
"min": 4.946423329480846e-06,
"max": 0.005631159874610603,
"count": 23
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 4.946423329480846e-06,
"min": 4.946423329480846e-06,
"max": 0.005631159874610603,
"count": 23
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 5.5892053903032014e-06,
"min": 5.5892053903032014e-06,
"max": 0.0053228480430940785,
"count": 23
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 5.5892053903032014e-06,
"min": 5.5892053903032014e-06,
"max": 0.0053228480430940785,
"count": 23
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 23
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 23
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 23
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 23
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 23
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 23
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1680178071",
"python_version": "3.9.16 (main, Mar 8 2023, 10:39:24) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\fabbr\\anaconda3\\envs\\RL\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.28.0",
"mlagents_envs_version": "0.28.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.0+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1680179347"
},
"total": 1276.2601662,
"count": 1,
"self": 0.21683649999999943,
"children": {
"run_training.setup": {
"total": 0.09062459999999994,
"count": 1,
"self": 0.09062459999999994
},
"TrainerController.start_learning": {
"total": 1275.9527051,
"count": 1,
"self": 0.6186606999947344,
"children": {
"TrainerController._reset_env": {
"total": 2.690172300000023,
"count": 3,
"self": 2.690172300000023
},
"TrainerController.advance": {
"total": 1272.546704200005,
"count": 32889,
"self": 0.5884741000011218,
"children": {
"env_step": {
"total": 435.67752660000787,
"count": 32889,
"self": 333.32855860001894,
"children": {
"SubprocessEnvManager._take_step": {
"total": 101.95911109998018,
"count": 32889,
"self": 3.4298776999974905,
"children": {
"TorchPolicy.evaluate": {
"total": 98.52923339998269,
"count": 65302,
"self": 98.52923339998269
}
}
},
"workers": {
"total": 0.38985690000872975,
"count": 32889,
"self": 0.0,
"children": {
"worker_root": {
"total": 1272.197441199986,
"count": 32889,
"is_parallel": true,
"self": 1013.072171399976,
"children": {
"steps_from_proto": {
"total": 0.0047829999999291495,
"count": 6,
"is_parallel": true,
"self": 0.0009274999999475675,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003855499999981582,
"count": 24,
"is_parallel": true,
"self": 0.003855499999981582
}
}
},
"UnityEnvironment.step": {
"total": 259.1204868000101,
"count": 32889,
"is_parallel": true,
"self": 14.351791900007441,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 12.451771100007303,
"count": 32889,
"is_parallel": true,
"self": 12.451771100007303
},
"communicator.exchange": {
"total": 188.19545779999288,
"count": 32889,
"is_parallel": true,
"self": 188.19545779999288
},
"steps_from_proto": {
"total": 44.12146600000247,
"count": 65778,
"is_parallel": true,
"self": 8.607366199996093,
"children": {
"_process_rank_one_or_two_observation": {
"total": 35.51409980000638,
"count": 263112,
"is_parallel": true,
"self": 35.51409980000638
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 836.2807034999961,
"count": 32889,
"self": 5.208988699994734,
"children": {
"process_trajectory": {
"total": 109.62950250000156,
"count": 32889,
"self": 109.50363830000177,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12586419999979626,
"count": 1,
"self": 0.12586419999979626
}
}
},
"_update_policy": {
"total": 721.4422122999998,
"count": 23,
"self": 60.46994850000101,
"children": {
"TorchPOCAOptimizer.update": {
"total": 660.9722637999988,
"count": 690,
"self": 660.9722637999988
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.09716790000015862,
"count": 1,
"self": 3.240000023652101e-05,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0971354999999221,
"count": 1,
"self": 0.0971354999999221
}
}
}
}
}
}
}