poca-SoccerTwos / run_logs /timers.json
Developer-Karthi's picture
First Push`
a08a652
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.182844638824463,
"min": 3.181525707244873,
"max": 3.2957041263580322,
"count": 80
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 50518.109375,
"min": 29227.0078125,
"max": 127586.9921875,
"count": 80
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 818.1428571428571,
"min": 476.0,
"max": 999.0,
"count": 80
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 22908.0,
"min": 16696.0,
"max": 23576.0,
"count": 80
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1206.8371049762618,
"min": 1195.8345822748809,
"max": 1206.8371049762618,
"count": 66
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 4827.348419905047,
"min": 2392.1692017894784,
"max": 14389.142808817227,
"count": 66
},
"SoccerTwos.Step.mean": {
"value": 799638.0,
"min": 9808.0,
"max": 799638.0,
"count": 80
},
"SoccerTwos.Step.sum": {
"value": 799638.0,
"min": 9808.0,
"max": 799638.0,
"count": 80
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.0033227666281163692,
"min": -0.02474234253168106,
"max": 0.007317614741623402,
"count": 80
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.04319596663117409,
"min": -0.4924280345439911,
"max": 0.1097642183303833,
"count": 80
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.0036345445550978184,
"min": -0.0267522931098938,
"max": 0.006356476806104183,
"count": 80
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.04724907875061035,
"min": -0.5386800169944763,
"max": 0.08899067342281342,
"count": 80
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 80
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 80
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.2242461511721978,
"min": -0.5384615384615384,
"max": 0.33637333114941914,
"count": 80
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 2.915199965238571,
"min": -7.0,
"max": 5.045599967241287,
"count": 80
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.2242461511721978,
"min": -0.5384615384615384,
"max": 0.33637333114941914,
"count": 80
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 2.915199965238571,
"min": -7.0,
"max": 5.045599967241287,
"count": 80
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 80
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 80
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.022476562972102935,
"min": 0.01271723781634743,
"max": 0.023505337241416176,
"count": 37
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.022476562972102935,
"min": 0.01271723781634743,
"max": 0.023505337241416176,
"count": 37
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.00233484919105346,
"min": 9.062712160812226e-06,
"max": 0.004905329699007173,
"count": 37
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.00233484919105346,
"min": 9.062712160812226e-06,
"max": 0.004905329699007173,
"count": 37
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.002335729360735665,
"min": 7.6276505448428605e-06,
"max": 0.004691832458289961,
"count": 37
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.002335729360735665,
"min": 7.6276505448428605e-06,
"max": 0.004691832458289961,
"count": 37
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 37
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 37
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 37
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 37
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 37
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 37
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1682345910",
"python_version": "3.9.16 (main, Mar 8 2023, 10:39:24) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "G:\\Anaconda\\envs\\Rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./SoccerTwos/SoccerTwos.exe --run-id=SoccerTwos_1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.7.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1682348216"
},
"total": 2306.6411957,
"count": 1,
"self": 0.21903269999984332,
"children": {
"run_training.setup": {
"total": 0.10491850000000014,
"count": 1,
"self": 0.10491850000000014
},
"TrainerController.start_learning": {
"total": 2306.3172445,
"count": 1,
"self": 1.1632733000010376,
"children": {
"TrainerController._reset_env": {
"total": 5.151768800000344,
"count": 5,
"self": 5.151768800000344
},
"TrainerController.advance": {
"total": 2299.5340294999983,
"count": 52189,
"self": 1.1959380000098463,
"children": {
"env_step": {
"total": 1062.8713237999702,
"count": 52189,
"self": 866.2638923999364,
"children": {
"SubprocessEnvManager._take_step": {
"total": 195.85483810001034,
"count": 52189,
"self": 6.361062900043237,
"children": {
"TorchPolicy.evaluate": {
"total": 189.4937751999671,
"count": 103616,
"self": 189.4937751999671
}
}
},
"workers": {
"total": 0.7525933000235829,
"count": 52188,
"self": 0.0,
"children": {
"worker_root": {
"total": 2299.4098572000007,
"count": 52188,
"is_parallel": true,
"self": 1577.947976400028,
"children": {
"steps_from_proto": {
"total": 0.010773600000544725,
"count": 10,
"is_parallel": true,
"self": 0.0023418000004022943,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.008431800000142431,
"count": 40,
"is_parallel": true,
"self": 0.008431800000142431
}
}
},
"UnityEnvironment.step": {
"total": 721.4511071999723,
"count": 52188,
"is_parallel": true,
"self": 26.85214269995356,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.98405239998881,
"count": 52188,
"is_parallel": true,
"self": 22.98405239998881
},
"communicator.exchange": {
"total": 587.4680631000268,
"count": 52188,
"is_parallel": true,
"self": 587.4680631000268
},
"steps_from_proto": {
"total": 84.14684900000302,
"count": 104376,
"is_parallel": true,
"self": 18.008921700008116,
"children": {
"_process_rank_one_or_two_observation": {
"total": 66.1379272999949,
"count": 417504,
"is_parallel": true,
"self": 66.1379272999949
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1235.4667677000182,
"count": 52188,
"self": 9.229053200004728,
"children": {
"process_trajectory": {
"total": 165.0186984000129,
"count": 52188,
"self": 162.9847335000129,
"children": {
"RLTrainer._checkpoint": {
"total": 2.033964900000001,
"count": 1,
"self": 2.033964900000001
}
}
},
"_update_policy": {
"total": 1061.2190161000005,
"count": 37,
"self": 130.1017159000046,
"children": {
"TorchPOCAOptimizer.update": {
"total": 931.117300199996,
"count": 1110,
"self": 931.117300199996
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.100000190490391e-06,
"count": 1,
"self": 1.100000190490391e-06
},
"TrainerController._save_models": {
"total": 0.4681718000001638,
"count": 1,
"self": 0.029122600000391685,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4390491999997721,
"count": 1,
"self": 0.4390491999997721
}
}
}
}
}
}
}