yechenzhi1's picture
First Push
6ca405b verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.927349865436554,
"min": 0.9151868224143982,
"max": 2.863839864730835,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8864.537109375,
"min": 8864.537109375,
"max": 29486.095703125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.010130882263184,
"min": 0.49093008041381836,
"max": 13.010130882263184,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2536.9755859375,
"min": 95.24043273925781,
"max": 2622.6455078125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0690803570540476,
"min": 0.061265367948163105,
"max": 0.07398634593061848,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2763214282161904,
"min": 0.24506147179265242,
"max": 0.36873720638401464,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19299197598707443,
"min": 0.1512683230502458,
"max": 0.28043705184085693,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7719679039482977,
"min": 0.6050732922009832,
"max": 1.4021852592042847,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.25,
"min": 4.431818181818182,
"max": 25.727272727272727,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1111.0,
"min": 195.0,
"max": 1415.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.25,
"min": 4.431818181818182,
"max": 25.727272727272727,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1111.0,
"min": 195.0,
"max": 1415.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1709275957",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1709276644"
},
"total": 686.939086101,
"count": 1,
"self": 1.021834963999936,
"children": {
"run_training.setup": {
"total": 0.10327775700000075,
"count": 1,
"self": 0.10327775700000075
},
"TrainerController.start_learning": {
"total": 685.8139733800001,
"count": 1,
"self": 1.0340636869964328,
"children": {
"TrainerController._reset_env": {
"total": 4.215449196999998,
"count": 1,
"self": 4.215449196999998
},
"TrainerController.advance": {
"total": 680.3739648590035,
"count": 18201,
"self": 0.5272880260201873,
"children": {
"env_step": {
"total": 679.8466768329833,
"count": 18201,
"self": 520.9432400999856,
"children": {
"SubprocessEnvManager._take_step": {
"total": 158.40148032100308,
"count": 18201,
"self": 3.285807582997222,
"children": {
"TorchPolicy.evaluate": {
"total": 155.11567273800586,
"count": 18201,
"self": 155.11567273800586
}
}
},
"workers": {
"total": 0.5019564119946835,
"count": 18201,
"self": 0.0,
"children": {
"worker_root": {
"total": 683.3118406430038,
"count": 18201,
"is_parallel": true,
"self": 318.2426573730108,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.009221320999984073,
"count": 1,
"is_parallel": true,
"self": 0.007309900000109337,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019114209998747356,
"count": 10,
"is_parallel": true,
"self": 0.0019114209998747356
}
}
},
"UnityEnvironment.step": {
"total": 0.0491117639999743,
"count": 1,
"is_parallel": true,
"self": 0.0008139540000229317,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005235360000028777,
"count": 1,
"is_parallel": true,
"self": 0.0005235360000028777
},
"communicator.exchange": {
"total": 0.04540969799995764,
"count": 1,
"is_parallel": true,
"self": 0.04540969799995764
},
"steps_from_proto": {
"total": 0.0023645759999908478,
"count": 1,
"is_parallel": true,
"self": 0.0004862650000063695,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018783109999844783,
"count": 10,
"is_parallel": true,
"self": 0.0018783109999844783
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 365.06918326999306,
"count": 18200,
"is_parallel": true,
"self": 17.572809522002046,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 8.831282990992065,
"count": 18200,
"is_parallel": true,
"self": 8.831282990992065
},
"communicator.exchange": {
"total": 287.3153845230023,
"count": 18200,
"is_parallel": true,
"self": 287.3153845230023
},
"steps_from_proto": {
"total": 51.34970623399664,
"count": 18200,
"is_parallel": true,
"self": 10.534750338998208,
"children": {
"_process_rank_one_or_two_observation": {
"total": 40.814955894998434,
"count": 182000,
"is_parallel": true,
"self": 40.814955894998434
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00028278900003897434,
"count": 1,
"self": 0.00028278900003897434,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 671.9448321300404,
"count": 880367,
"is_parallel": true,
"self": 21.76388904399687,
"children": {
"process_trajectory": {
"total": 365.1655518200436,
"count": 880367,
"is_parallel": true,
"self": 363.95258202704366,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2129697929999566,
"count": 4,
"is_parallel": true,
"self": 1.2129697929999566
}
}
},
"_update_policy": {
"total": 285.0153912659998,
"count": 90,
"is_parallel": true,
"self": 70.10613862299982,
"children": {
"TorchPPOOptimizer.update": {
"total": 214.909252643,
"count": 4587,
"is_parallel": true,
"self": 214.909252643
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.19021284800010108,
"count": 1,
"self": 0.0028131100002610765,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18739973799984,
"count": 1,
"self": 0.18739973799984
}
}
}
}
}
}
}