poca-SoccerTwos / run_logs /timers.json
FelipePasquevich's picture
First Push`
60ec2db
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.2623162269592285,
"min": 3.2615444660186768,
"max": 3.2957561016082764,
"count": 16
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 68691.328125,
"min": 40394.92578125,
"max": 105464.1875,
"count": 16
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 601.7777777777778,
"min": 574.4,
"max": 999.0,
"count": 16
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 21664.0,
"min": 11488.0,
"max": 28988.0,
"count": 16
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1197.3408982701842,
"min": 1197.3408982701842,
"max": 1203.8175274476016,
"count": 15
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 14368.090779242211,
"min": 2396.1535795422496,
"max": 14368.090779242211,
"count": 15
},
"SoccerTwos.Step.mean": {
"value": 159824.0,
"min": 9512.0,
"max": 159824.0,
"count": 16
},
"SoccerTwos.Step.sum": {
"value": 159824.0,
"min": 9512.0,
"max": 159824.0,
"count": 16
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.026351574808359146,
"min": -0.05036341771483421,
"max": -0.026351574808359146,
"count": 16
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.4479767680168152,
"min": -0.7428839206695557,
"max": -0.3202369809150696,
"count": 16
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.025530071929097176,
"min": -0.050388529896736145,
"max": -0.025530071929097176,
"count": 16
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.43401122093200684,
"min": -0.7053990960121155,
"max": -0.34283724427223206,
"count": 16
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 16
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 16
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.05778823179357192,
"min": -0.3965142858879907,
"max": 0.2683384601886456,
"count": 16
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 0.9823999404907227,
"min": -5.5512000024318695,
"max": 3.4883999824523926,
"count": 16
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.05778823179357192,
"min": -0.3965142858879907,
"max": 0.2683384601886456,
"count": 16
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 0.9823999404907227,
"min": -5.5512000024318695,
"max": 3.4883999824523926,
"count": 16
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.016798803419806062,
"min": 0.013983288051288885,
"max": 0.019427612053308015,
"count": 7
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.016798803419806062,
"min": 0.013983288051288885,
"max": 0.019427612053308015,
"count": 7
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.003357618247779707,
"min": 0.0016571837554996212,
"max": 0.0046502520640691125,
"count": 7
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.003357618247779707,
"min": 0.0016571837554996212,
"max": 0.0046502520640691125,
"count": 7
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.0033834061313730976,
"min": 0.0017213587920802335,
"max": 0.004764475013750295,
"count": 7
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.0033834061313730976,
"min": 0.0017213587920802335,
"max": 0.004764475013750295,
"count": 7
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 7
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 7
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 7
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 7
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 7
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 7
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1685065634",
"python_version": "3.9.16 (main, Mar 8 2023, 10:39:24) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\PC-Admin\\Anaconda\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1685073174"
},
"total": 7540.803681587,
"count": 1,
"self": 0.11845842399907269,
"children": {
"run_training.setup": {
"total": 3.098007620000004,
"count": 1,
"self": 3.098007620000004
},
"TrainerController.start_learning": {
"total": 7537.587215543001,
"count": 1,
"self": 0.819069577953087,
"children": {
"TrainerController._reset_env": {
"total": 92.219704239,
"count": 1,
"self": 92.219704239
},
"TrainerController.advance": {
"total": 7433.0073113540475,
"count": 10834,
"self": 0.8331230440899162,
"children": {
"env_step": {
"total": 6008.036598750998,
"count": 10834,
"self": 5817.722961007964,
"children": {
"SubprocessEnvManager._take_step": {
"total": 189.78471672804437,
"count": 10834,
"self": 6.030594909089899,
"children": {
"TorchPolicy.evaluate": {
"total": 183.75412181895447,
"count": 21504,
"self": 183.75412181895447
}
}
},
"workers": {
"total": 0.5289210149895496,
"count": 10833,
"self": 0.0,
"children": {
"worker_root": {
"total": 7434.3655564799865,
"count": 10833,
"is_parallel": true,
"self": 1727.325292816975,
"children": {
"steps_from_proto": {
"total": 0.02151822899999445,
"count": 2,
"is_parallel": true,
"self": 0.002809094999975059,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.01870913400001939,
"count": 8,
"is_parallel": true,
"self": 0.01870913400001939
}
}
},
"UnityEnvironment.step": {
"total": 5707.018745434011,
"count": 10833,
"is_parallel": true,
"self": 10.761603176963035,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 122.15321961302764,
"count": 10833,
"is_parallel": true,
"self": 122.15321961302764
},
"communicator.exchange": {
"total": 5369.75882440797,
"count": 10833,
"is_parallel": true,
"self": 5369.75882440797
},
"steps_from_proto": {
"total": 204.34509823605106,
"count": 21666,
"is_parallel": true,
"self": 23.157507915941096,
"children": {
"_process_rank_one_or_two_observation": {
"total": 181.18759032010996,
"count": 86664,
"is_parallel": true,
"self": 181.18759032010996
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1424.1375895589601,
"count": 10833,
"self": 4.837865171013618,
"children": {
"process_trajectory": {
"total": 196.17368740894636,
"count": 10833,
"self": 196.17368740894636
},
"_update_policy": {
"total": 1223.1260369790002,
"count": 7,
"self": 103.8595243900022,
"children": {
"TorchPOCAOptimizer.update": {
"total": 1119.266512588998,
"count": 210,
"self": 1119.266512588998
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.9740000425372273e-06,
"count": 1,
"self": 1.9740000425372273e-06
},
"TrainerController._save_models": {
"total": 11.541128398000183,
"count": 1,
"self": 6.315999962680507e-05,
"children": {
"RLTrainer._checkpoint": {
"total": 11.541065238000556,
"count": 1,
"self": 11.541065238000556
}
}
}
}
}
}
}