abhi-R's picture
First Push
b247e10
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.9926632642745972,
"min": 1.7032517194747925,
"max": 3.2956745624542236,
"count": 1000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 20532.40234375,
"min": 16424.9609375,
"max": 120260.3125,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 389.8666666666667,
"max": 999.0,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19980.0,
"min": 11804.0,
"max": 27508.0,
"count": 1000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1190.4112588948528,
"min": 1185.9612614308237,
"max": 1204.606750676724,
"count": 163
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 2380.8225177897057,
"min": 2371.9225228616474,
"max": 23792.67423685981,
"count": 163
},
"SoccerTwos.Step.mean": {
"value": 9999020.0,
"min": 9178.0,
"max": 9999020.0,
"count": 1000
},
"SoccerTwos.Step.sum": {
"value": 9999020.0,
"min": 9178.0,
"max": 9999020.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -9.627002873457968e-05,
"min": -0.02888946793973446,
"max": 0.009360288269817829,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.0009627002873457968,
"min": -0.42032384872436523,
"max": 0.13703712821006775,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -9.633601439418271e-05,
"min": -0.027621883898973465,
"max": 0.009541833773255348,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.0009633601293899119,
"min": -0.4192490577697754,
"max": 0.13236786425113678,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.0,
"min": -0.6666666666666666,
"max": 0.2942769252336942,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 0.0,
"min": -10.393200010061264,
"max": 3.825600028038025,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.0,
"min": -0.6666666666666666,
"max": 0.2942769252336942,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 0.0,
"min": -10.393200010061264,
"max": 3.825600028038025,
"count": 1000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.018380161248690758,
"min": 0.010514204100278828,
"max": 0.024833333554367225,
"count": 458
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.018380161248690758,
"min": 0.010514204100278828,
"max": 0.024833333554367225,
"count": 458
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 1.3376915704762146e-09,
"min": 5.053695480613148e-10,
"max": 0.005721124960109592,
"count": 458
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 1.3376915704762146e-09,
"min": 5.053695480613148e-10,
"max": 0.005721124960109592,
"count": 458
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 1.3802510347623217e-09,
"min": 5.668687306596236e-10,
"max": 0.00604997156187892,
"count": 458
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 1.3802510347623217e-09,
"min": 5.668687306596236e-10,
"max": 0.00604997156187892,
"count": 458
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 458
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 458
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 458
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 458
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 458
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 458
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1685284538",
"python_version": "3.10.6 (main, Oct 24 2022, 16:07:47) [GCC 11.2.0]",
"command_line_arguments": "/home/abhishek/anaconda3/envs/swasti_icra_rush/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos_1 --force --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1685335792"
},
"total": 51254.06159239,
"count": 1,
"self": 0.0951508040088811,
"children": {
"run_training.setup": {
"total": 0.03154678299688385,
"count": 1,
"self": 0.03154678299688385
},
"TrainerController.start_learning": {
"total": 51253.934894802995,
"count": 1,
"self": 18.824933010415407,
"children": {
"TrainerController._reset_env": {
"total": 8.698706835992198,
"count": 50,
"self": 8.698706835992198
},
"TrainerController.advance": {
"total": 51225.97774020459,
"count": 650259,
"self": 20.495995277859038,
"children": {
"env_step": {
"total": 20392.102711727694,
"count": 650259,
"self": 16617.968157610474,
"children": {
"SubprocessEnvManager._take_step": {
"total": 3761.3976993648103,
"count": 650259,
"self": 162.75352947525244,
"children": {
"TorchPolicy.evaluate": {
"total": 3598.644169889558,
"count": 1292360,
"self": 3598.644169889558
}
}
},
"workers": {
"total": 12.736854752409272,
"count": 650258,
"self": 0.0,
"children": {
"worker_root": {
"total": 51213.508485654624,
"count": 650258,
"is_parallel": true,
"self": 37263.78115457906,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005483104003360495,
"count": 2,
"is_parallel": true,
"self": 0.0014108680006756913,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.004072236002684804,
"count": 8,
"is_parallel": true,
"self": 0.004072236002684804
}
}
},
"UnityEnvironment.step": {
"total": 0.04317376000108197,
"count": 1,
"is_parallel": true,
"self": 0.001116396004363196,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0011059059979743324,
"count": 1,
"is_parallel": true,
"self": 0.0011059059979743324
},
"communicator.exchange": {
"total": 0.03825828800108866,
"count": 1,
"is_parallel": true,
"self": 0.03825828800108866
},
"steps_from_proto": {
"total": 0.0026931699976557866,
"count": 2,
"is_parallel": true,
"self": 0.0005549960005737375,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002138173997082049,
"count": 8,
"is_parallel": true,
"self": 0.002138173997082049
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 13949.54303881458,
"count": 650257,
"is_parallel": true,
"self": 918.1807506679324,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 618.407143049888,
"count": 650257,
"is_parallel": true,
"self": 618.407143049888
},
"communicator.exchange": {
"total": 10012.455762370046,
"count": 650257,
"is_parallel": true,
"self": 10012.455762370046
},
"steps_from_proto": {
"total": 2400.4993827267135,
"count": 1300514,
"is_parallel": true,
"self": 455.66124329324157,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1944.838139433472,
"count": 5202056,
"is_parallel": true,
"self": 1944.838139433472
}
}
}
}
},
"steps_from_proto": {
"total": 0.18429226098305662,
"count": 98,
"is_parallel": true,
"self": 0.03483632390634739,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.14945593707670923,
"count": 392,
"is_parallel": true,
"self": 0.14945593707670923
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 30813.37903319904,
"count": 650258,
"self": 181.0912137884261,
"children": {
"process_trajectory": {
"total": 3993.1759212116267,
"count": 650258,
"self": 3987.388774043633,
"children": {
"RLTrainer._checkpoint": {
"total": 5.78714716799368,
"count": 20,
"self": 5.78714716799368
}
}
},
"_update_policy": {
"total": 26639.111898198986,
"count": 458,
"self": 1968.3501102650953,
"children": {
"TorchPOCAOptimizer.update": {
"total": 24670.76178793389,
"count": 13746,
"self": 24670.76178793389
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.0840088836848736e-06,
"count": 1,
"self": 2.0840088836848736e-06
},
"TrainerController._save_models": {
"total": 0.43351266799436416,
"count": 1,
"self": 0.0040231279854197055,
"children": {
"RLTrainer._checkpoint": {
"total": 0.42948954000894446,
"count": 1,
"self": 0.42948954000894446
}
}
}
}
}
}
}