poca-SoccerTwos / run_logs /timers.json
DanGalt's picture
First model
629f03b
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.2531075477600098,
"min": 2.205596923828125,
"max": 3.2148594856262207,
"count": 393
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 44485.35546875,
"min": 21093.712890625,
"max": 175992.0,
"count": 393
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 54.333333333333336,
"min": 42.55263157894737,
"max": 999.0,
"count": 393
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19560.0,
"min": 3996.0,
"max": 32648.0,
"count": 393
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1450.3141492791954,
"min": 1183.1308426287494,
"max": 1458.2502183512165,
"count": 376
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 261056.54687025517,
"min": 2382.429530164287,
"max": 331018.88755897584,
"count": 376
},
"SoccerTwos.Step.mean": {
"value": 4999989.0,
"min": 1069006.0,
"max": 4999989.0,
"count": 394
},
"SoccerTwos.Step.sum": {
"value": 4999989.0,
"min": 1069006.0,
"max": 4999989.0,
"count": 394
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.06466507166624069,
"min": -0.045848406851291656,
"max": 0.22180138528347015,
"count": 394
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 11.6397123336792,
"min": -6.60572624206543,
"max": 34.379215240478516,
"count": 394
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.06135227531194687,
"min": -0.046617619693279266,
"max": 0.2225351333618164,
"count": 394
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 11.04340934753418,
"min": -7.996595859527588,
"max": 34.49294662475586,
"count": 394
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 394
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 394
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.01482444273100959,
"min": -0.6666666666666666,
"max": 0.42734762210221516,
"count": 394
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 2.668399691581726,
"min": -37.52600026130676,
"max": 71.35640001296997,
"count": 394
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.01482444273100959,
"min": -0.6666666666666666,
"max": 0.42734762210221516,
"count": 394
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 2.668399691581726,
"min": -37.52600026130676,
"max": 71.35640001296997,
"count": 394
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 394
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 394
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.015964625528431496,
"min": 0.011339024729871501,
"max": 0.022405783160744857,
"count": 187
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.015964625528431496,
"min": 0.011339024729871501,
"max": 0.022405783160744857,
"count": 187
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10514686654011408,
"min": 0.00021651945450381997,
"max": 0.10809014067053795,
"count": 187
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10514686654011408,
"min": 0.00021651945450381997,
"max": 0.10809014067053795,
"count": 187
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.107951819896698,
"min": 0.0002174987540153476,
"max": 0.1108481079339981,
"count": 187
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.107951819896698,
"min": 0.0002174987540153476,
"max": 0.1108481079339981,
"count": 187
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 187
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 187
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 187
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 187
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 187
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 187
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675414378",
"python_version": "3.8.16 (default, Jan 17 2023, 23:13:24) \n[GCC 11.2.0]",
"command_line_arguments": "/home/emigoi/miniconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTows.x86_64 --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1675423513"
},
"total": 9134.877508399999,
"count": 1,
"self": 0.5058795000004466,
"children": {
"run_training.setup": {
"total": 0.01188659999999686,
"count": 1,
"self": 0.01188659999999686
},
"TrainerController.start_learning": {
"total": 9134.3597423,
"count": 1,
"self": 4.736177900183975,
"children": {
"TrainerController._reset_env": {
"total": 6.598423800003019,
"count": 41,
"self": 6.598423800003019
},
"TrainerController.advance": {
"total": 9122.680936199813,
"count": 267346,
"self": 4.950059300006615,
"children": {
"env_step": {
"total": 7276.087199399884,
"count": 267346,
"self": 4864.879236099585,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2408.223233100152,
"count": 267346,
"self": 32.16540580033279,
"children": {
"TorchPolicy.evaluate": {
"total": 2376.057827299819,
"count": 509968,
"self": 2376.057827299819
}
}
},
"workers": {
"total": 2.9847302001481566,
"count": 267346,
"self": 0.0,
"children": {
"worker_root": {
"total": 9120.953405599965,
"count": 267346,
"is_parallel": true,
"self": 4811.26172639982,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0026335000000017317,
"count": 2,
"is_parallel": true,
"self": 0.0011161000000043941,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015173999999973375,
"count": 8,
"is_parallel": true,
"self": 0.0015173999999973375
}
}
},
"UnityEnvironment.step": {
"total": 0.02836279999999647,
"count": 1,
"is_parallel": true,
"self": 0.0004635000000021705,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00038879999999608117,
"count": 1,
"is_parallel": true,
"self": 0.00038879999999608117
},
"communicator.exchange": {
"total": 0.026017500000001803,
"count": 1,
"is_parallel": true,
"self": 0.026017500000001803
},
"steps_from_proto": {
"total": 0.0014929999999964139,
"count": 2,
"is_parallel": true,
"self": 0.00032689999999746533,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011660999999989485,
"count": 8,
"is_parallel": true,
"self": 0.0011660999999989485
}
}
}
}
}
}
},
"steps_from_proto": {
"total": 0.06119719999665563,
"count": 80,
"is_parallel": true,
"self": 0.011537400000626974,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.04965979999602865,
"count": 320,
"is_parallel": true,
"self": 0.04965979999602865
}
}
},
"UnityEnvironment.step": {
"total": 4309.630482000148,
"count": 267345,
"is_parallel": true,
"self": 123.45304180037056,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 89.1560935000251,
"count": 267345,
"is_parallel": true,
"self": 89.1560935000251
},
"communicator.exchange": {
"total": 3687.05164360012,
"count": 267345,
"is_parallel": true,
"self": 3687.05164360012
},
"steps_from_proto": {
"total": 409.96970309963245,
"count": 534690,
"is_parallel": true,
"self": 79.19082019962849,
"children": {
"_process_rank_one_or_two_observation": {
"total": 330.77888290000396,
"count": 2138760,
"is_parallel": true,
"self": 330.77888290000396
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1841.643677499922,
"count": 267346,
"self": 29.870324400264508,
"children": {
"process_trajectory": {
"total": 673.5833846996571,
"count": 267346,
"self": 672.0646403996572,
"children": {
"RLTrainer._checkpoint": {
"total": 1.5187442999999803,
"count": 8,
"self": 1.5187442999999803
}
}
},
"_update_policy": {
"total": 1138.1899684000002,
"count": 187,
"self": 435.380598200015,
"children": {
"TorchPOCAOptimizer.update": {
"total": 702.8093701999852,
"count": 5628,
"self": 702.8093701999852
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.999991117278114e-07,
"count": 1,
"self": 5.999991117278114e-07
},
"TrainerController._save_models": {
"total": 0.3442037999993772,
"count": 1,
"self": 0.001438600000255974,
"children": {
"RLTrainer._checkpoint": {
"total": 0.34276519999912125,
"count": 1,
"self": 0.34276519999912125
}
}
}
}
}
}
}