adrian-nf's picture
Agent training resumed from the checkpoint.
32e8f88 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.2813422679901123,
"min": 3.2797603607177734,
"max": 3.2813422679901123,
"count": 3
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 69406.953125,
"min": 35578.83984375,
"max": 104975.2421875,
"count": 3
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 984.5,
"min": 459.0,
"max": 984.5,
"count": 3
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 23628.0,
"min": 3672.0,
"max": 28916.0,
"count": 3
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1201.222288407967,
"min": 1201.222288407967,
"max": 1202.4914404208353,
"count": 3
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 2402.444576815934,
"min": 2402.444576815934,
"max": 7209.071703418774,
"count": 3
},
"SoccerTwos.Step.mean": {
"value": 59706.0,
"min": 39040.0,
"max": 59706.0,
"count": 3
},
"SoccerTwos.Step.sum": {
"value": 59706.0,
"min": 39040.0,
"max": 59706.0,
"count": 3
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.0251691285520792,
"min": 0.0251691285520792,
"max": 0.027628587558865547,
"count": 3
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 0.27686041593551636,
"min": 0.21094191074371338,
"max": 0.33154305815696716,
"count": 3
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.019476601853966713,
"min": 0.019476601853966713,
"max": 0.02308989316225052,
"count": 3
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 0.214242622256279,
"min": 0.1697760671377182,
"max": 0.2770787179470062,
"count": 3
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 3
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 3
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.36363636363636365,
"min": -0.36363636363636365,
"max": 0.11886666218439738,
"count": 3
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -4.0,
"min": -4.0,
"max": 1.4263999462127686,
"count": 3
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.36363636363636365,
"min": -0.36363636363636365,
"max": 0.11886666218439738,
"count": 3
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -4.0,
"min": -4.0,
"max": 1.4263999462127686,
"count": 3
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 3
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 3
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1734349453",
"python_version": "3.10.12 (main, Nov 6 2024, 20:22:13) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./results/SoccerTwosNew/configuration.yaml --env=train-soccer/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwosNew --no-graphics --resume",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1734349617"
},
"total": 163.16620889400002,
"count": 1,
"self": 0.009594704000022602,
"children": {
"run_training.setup": {
"total": 0.10617190499999651,
"count": 1,
"self": 0.10617190499999651
},
"TrainerController.start_learning": {
"total": 163.050442285,
"count": 1,
"self": 0.08201455699901317,
"children": {
"TrainerController._reset_env": {
"total": 6.299881440000007,
"count": 1,
"self": 6.299881440000007
},
"TrainerController.advance": {
"total": 156.383009999001,
"count": 2004,
"self": 0.07024006500196833,
"children": {
"env_step": {
"total": 64.22660732399879,
"count": 2004,
"self": 52.391058236997,
"children": {
"SubprocessEnvManager._take_step": {
"total": 11.798904964001565,
"count": 2004,
"self": 0.38294599499960214,
"children": {
"TorchPolicy.evaluate": {
"total": 11.415958969001963,
"count": 4000,
"self": 11.415958969001963
}
}
},
"workers": {
"total": 0.036644123000229456,
"count": 2004,
"self": 0.0,
"children": {
"worker_root": {
"total": 80.18677688799966,
"count": 2004,
"is_parallel": true,
"self": 36.25111745799899,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007672471999995878,
"count": 2,
"is_parallel": true,
"self": 0.0018231199999831915,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.005849352000012686,
"count": 8,
"is_parallel": true,
"self": 0.005849352000012686
}
}
},
"UnityEnvironment.step": {
"total": 0.0461561890000155,
"count": 1,
"is_parallel": true,
"self": 0.0013367729999913536,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0009743020000030356,
"count": 1,
"is_parallel": true,
"self": 0.0009743020000030356
},
"communicator.exchange": {
"total": 0.03982881500002122,
"count": 1,
"is_parallel": true,
"self": 0.03982881500002122
},
"steps_from_proto": {
"total": 0.00401629899999989,
"count": 2,
"is_parallel": true,
"self": 0.000687369999980092,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003328929000019798,
"count": 8,
"is_parallel": true,
"self": 0.003328929000019798
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 43.93565943000067,
"count": 2003,
"is_parallel": true,
"self": 2.6245066150007403,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 1.7792221570002482,
"count": 2003,
"is_parallel": true,
"self": 1.7792221570002482
},
"communicator.exchange": {
"total": 31.125804646999285,
"count": 2003,
"is_parallel": true,
"self": 31.125804646999285
},
"steps_from_proto": {
"total": 8.406126011000396,
"count": 4006,
"is_parallel": true,
"self": 1.4179039570013856,
"children": {
"_process_rank_one_or_two_observation": {
"total": 6.98822205399901,
"count": 16024,
"is_parallel": true,
"self": 6.98822205399901
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 92.08616261000023,
"count": 2004,
"self": 0.3026151499994967,
"children": {
"process_trajectory": {
"total": 13.09741929400073,
"count": 2004,
"self": 13.09741929400073
},
"_update_policy": {
"total": 78.686128166,
"count": 1,
"self": 5.072893774000022,
"children": {
"TorchPOCAOptimizer.update": {
"total": 73.61323439199998,
"count": 37,
"self": 73.61323439199998
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5800000028320937e-06,
"count": 1,
"self": 1.5800000028320937e-06
},
"TrainerController._save_models": {
"total": 0.28553470899998956,
"count": 1,
"self": 0.0026438130000201454,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2828908959999694,
"count": 1,
"self": 0.2828908959999694
}
}
}
}
}
}
}