poca-SoccerTwos / run_logs /timers.json
OliP's picture
v1 Model
7280381
raw
history blame contribute delete
No virus
15.6 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.7609968185424805,
"min": 2.759597063064575,
"max": 3.2947139739990234,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 53011.140625,
"min": 23319.828125,
"max": 130506.2890625,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 64.34210526315789,
"min": 60.525,
"max": 999.0,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19560.0,
"min": 3996.0,
"max": 30808.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1426.7027287832227,
"min": 1198.430818454253,
"max": 1458.5056356146274,
"count": 492
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 216858.81477504986,
"min": 2401.4985608882907,
"max": 229114.54091223533,
"count": 492
},
"SoccerTwos.Step.mean": {
"value": 4999968.0,
"min": 9420.0,
"max": 4999968.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 4999968.0,
"min": 9420.0,
"max": 4999968.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.0476294569671154,
"min": -0.1635277420282364,
"max": 0.1583396941423416,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 7.287306785583496,
"min": -18.06836700439453,
"max": 22.167556762695312,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.044529274106025696,
"min": -0.19693352282047272,
"max": 0.1574898660182953,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 6.812979221343994,
"min": -18.4958553314209,
"max": 22.048582077026367,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.10239608147565056,
"min": -0.625,
"max": 0.38594666719436643,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 15.666600465774536,
"min": -58.52519977092743,
"max": 49.21760034561157,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.10239608147565056,
"min": -0.625,
"max": 0.38594666719436643,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 15.666600465774536,
"min": -58.52519977092743,
"max": 49.21760034561157,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.11955090509727598,
"min": 0.0549288809299469,
"max": 0.15605423919235667,
"count": 295
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.11955090509727598,
"min": 0.0549288809299469,
"max": 0.15605423919235667,
"count": 295
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.07651706002652645,
"min": 0.0008761773497099057,
"max": 0.0770426332950592,
"count": 295
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.07651706002652645,
"min": 0.0008761773497099057,
"max": 0.0770426332950592,
"count": 295
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.07690190883974234,
"min": 0.0009618056486942805,
"max": 0.07727048384646575,
"count": 295
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.07690190883974234,
"min": 0.0009618056486942805,
"max": 0.07727048384646575,
"count": 295
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 2.8909994219998365e-07,
"min": 2.8909994219998365e-07,
"max": 0.0004970604005879202,
"count": 295
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 2.8909994219998365e-07,
"min": 2.8909994219998365e-07,
"max": 0.0004970604005879202,
"count": 295
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.10005780000000003,
"min": 0.10005780000000003,
"max": 0.19941208000000002,
"count": 295
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.10005780000000003,
"min": 0.10005780000000003,
"max": 0.19941208000000002,
"count": 295
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 295
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 295
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676281927",
"python_version": "3.10.8 (tags/v3.10.8:aaaf517, Oct 11 2022, 16:50:30) [MSC v.1933 64 bit (AMD64)]",
"command_line_arguments": "C:\\_venv\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos_v1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.13.1+cu117",
"numpy_version": "1.23.5",
"end_time_seconds": "1676293444"
},
"total": 11517.50440370012,
"count": 1,
"self": 0.30014590080827475,
"children": {
"run_training.setup": {
"total": 0.12665479956194758,
"count": 1,
"self": 0.12665479956194758
},
"TrainerController.start_learning": {
"total": 11517.07760299975,
"count": 1,
"self": 8.454678062349558,
"children": {
"TrainerController._reset_env": {
"total": 6.790920899249613,
"count": 25,
"self": 6.790920899249613
},
"TrainerController.advance": {
"total": 11501.488495837897,
"count": 332189,
"self": 8.324804423376918,
"children": {
"env_step": {
"total": 7924.832759073004,
"count": 332189,
"self": 4662.1909966696985,
"children": {
"SubprocessEnvManager._take_step": {
"total": 3257.769733860623,
"count": 332189,
"self": 67.06523498380557,
"children": {
"TorchPolicy.evaluate": {
"total": 3190.7044988768175,
"count": 634528,
"self": 3190.7044988768175
}
}
},
"workers": {
"total": 4.8720285426825285,
"count": 332189,
"self": 0.0,
"children": {
"worker_root": {
"total": 11500.661082218867,
"count": 332189,
"is_parallel": true,
"self": 7801.927220531274,
"children": {
"steps_from_proto": {
"total": 0.045304300263524055,
"count": 50,
"is_parallel": true,
"self": 0.009331699926406145,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.03597260033711791,
"count": 200,
"is_parallel": true,
"self": 0.03597260033711791
}
}
},
"UnityEnvironment.step": {
"total": 3698.6885573873296,
"count": 332189,
"is_parallel": true,
"self": 190.02568586403504,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 144.70981773221865,
"count": 332189,
"is_parallel": true,
"self": 144.70981773221865
},
"communicator.exchange": {
"total": 2766.456783823669,
"count": 332189,
"is_parallel": true,
"self": 2766.456783823669
},
"steps_from_proto": {
"total": 597.496269967407,
"count": 664378,
"is_parallel": true,
"self": 122.68107999535277,
"children": {
"_process_rank_one_or_two_observation": {
"total": 474.8151899720542,
"count": 2657512,
"is_parallel": true,
"self": 474.8151899720542
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 3568.330932341516,
"count": 332189,
"self": 65.08589063258842,
"children": {
"process_trajectory": {
"total": 2240.8907405100763,
"count": 332189,
"self": 2237.5077686090954,
"children": {
"RLTrainer._checkpoint": {
"total": 3.3829719009809196,
"count": 10,
"self": 3.3829719009809196
}
}
},
"_update_policy": {
"total": 1262.3543011988513,
"count": 295,
"self": 116.87342757498845,
"children": {
"TorchPOCAOptimizer.update": {
"total": 1145.4808736238629,
"count": 8862,
"self": 1145.4808736238629
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.400243490934372e-06,
"count": 1,
"self": 1.400243490934372e-06
},
"TrainerController._save_models": {
"total": 0.34350680001080036,
"count": 1,
"self": 0.050042600370943546,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2934641996398568,
"count": 1,
"self": 0.2934641996398568
}
}
}
}
}
}
}