poca-SoccerTwos / run_logs /timers.json
ibrahimciko's picture
first push
a046443
raw
history blame
12.1 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.2957217693328857,
"min": 3.2957217693328857,
"max": 3.2957217693328857,
"count": 1
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 105463.09375,
"min": 105463.09375,
"max": 105463.09375,
"count": 1
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 756.625,
"min": 756.625,
"max": 756.625,
"count": 1
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 24212.0,
"min": 24212.0,
"max": 24212.0,
"count": 1
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1202.234955112457,
"min": 1202.234955112457,
"max": 1202.234955112457,
"count": 1
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 9617.879640899657,
"min": 9617.879640899657,
"max": 9617.879640899657,
"count": 1
},
"SoccerTwos.Step.mean": {
"value": 9122.0,
"min": 9122.0,
"max": 9122.0,
"count": 1
},
"SoccerTwos.Step.sum": {
"value": 9122.0,
"min": 9122.0,
"max": 9122.0,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.10343889892101288,
"min": -0.10343889892101288,
"max": -0.10343889892101288,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -1.3447057008743286,
"min": -1.3447057008743286,
"max": -1.3447057008743286,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.10348843038082123,
"min": -0.10348843038082123,
"max": -0.10348843038082123,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -1.3453495502471924,
"min": -1.3453495502471924,
"max": -1.3453495502471924,
"count": 1
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.29861539143782395,
"min": 0.29861539143782395,
"max": 0.29861539143782395,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 3.8820000886917114,
"min": 3.8820000886917114,
"max": 3.8820000886917114,
"count": 1
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.29861539143782395,
"min": 0.29861539143782395,
"max": 0.29861539143782395,
"count": 1
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 3.8820000886917114,
"min": 3.8820000886917114,
"max": 3.8820000886917114,
"count": 1
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1694074756",
"python_version": "3.9.18 (main, Aug 24 2023, 21:20:15) \n[Clang 14.0.0 (clang-1400.0.29.202)]",
"command_line_arguments": "/Users/cikriibr/projects/learning/deepRL/env/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos2 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0",
"numpy_version": "1.21.2",
"end_time_seconds": "1694074809"
},
"total": 53.066556375000005,
"count": 1,
"self": 0.28110941700000325,
"children": {
"run_training.setup": {
"total": 0.030046958000000012,
"count": 1,
"self": 0.030046958000000012
},
"TrainerController.start_learning": {
"total": 52.7554,
"count": 1,
"self": 0.012068182999939836,
"children": {
"TrainerController._reset_env": {
"total": 1.694077167,
"count": 1,
"self": 1.694077167
},
"TrainerController.advance": {
"total": 50.90541894100006,
"count": 1004,
"self": 0.01212669900018426,
"children": {
"env_step": {
"total": 49.66604145199994,
"count": 1004,
"self": 48.0027904279998,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1.6545578100000276,
"count": 1004,
"self": 0.045994196000025855,
"children": {
"TorchPolicy.evaluate": {
"total": 1.6085636140000017,
"count": 2000,
"self": 1.6085636140000017
}
}
},
"workers": {
"total": 0.008693214000111027,
"count": 1004,
"self": 0.0,
"children": {
"worker_root": {
"total": 50.13892670999993,
"count": 1004,
"is_parallel": true,
"self": 3.555963091999992,
"children": {
"steps_from_proto": {
"total": 0.0014188749999999306,
"count": 2,
"is_parallel": true,
"self": 0.0002449999999993846,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001173875000000546,
"count": 8,
"is_parallel": true,
"self": 0.001173875000000546
}
}
},
"UnityEnvironment.step": {
"total": 46.58154474299994,
"count": 1004,
"is_parallel": true,
"self": 0.12588737199990874,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.7080112539999688,
"count": 1004,
"is_parallel": true,
"self": 0.7080112539999688
},
"communicator.exchange": {
"total": 44.389486800000086,
"count": 1004,
"is_parallel": true,
"self": 44.389486800000086
},
"steps_from_proto": {
"total": 1.3581593169999737,
"count": 2008,
"is_parallel": true,
"self": 0.19286627000009937,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1.1652930469998743,
"count": 8032,
"is_parallel": true,
"self": 1.1652930469998743
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1.227250789999938,
"count": 1004,
"self": 0.0674553749999598,
"children": {
"process_trajectory": {
"total": 1.1597954149999783,
"count": 1004,
"self": 1.1597954149999783
}
}
}
}
},
"trainer_threads": {
"total": 4.170000025283116e-07,
"count": 1,
"self": 4.170000025283116e-07
},
"TrainerController._save_models": {
"total": 0.14383529199999856,
"count": 1,
"self": 0.0480642919999994,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09577099999999916,
"count": 1,
"self": 0.09577099999999916
}
}
}
}
}
}
}