Wanlin0001's picture
First Push
671dce0
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.006709337234497,
"min": 1.006709337234497,
"max": 2.864173173904419,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9612.060546875,
"min": 9612.060546875,
"max": 29395.009765625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 11.996334075927734,
"min": 0.3177822232246399,
"max": 12.014837265014648,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2339.28515625,
"min": 61.64975357055664,
"max": 2451.02685546875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06984036025824025,
"min": 0.06108423119248805,
"max": 0.07615923770394076,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.279361441032961,
"min": 0.2545921234864805,
"max": 0.36159105115503365,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20112433028863927,
"min": 0.10848450459660872,
"max": 0.28268590554887174,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8044973211545571,
"min": 0.43393801838643487,
"max": 1.3106496345763115,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 23.818181818181817,
"min": 3.25,
"max": 23.818181818181817,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1048.0,
"min": 143.0,
"max": 1294.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 23.818181818181817,
"min": 3.25,
"max": 23.818181818181817,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1048.0,
"min": 143.0,
"max": 1294.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1701100453",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1701100952"
},
"total": 499.5031696479998,
"count": 1,
"self": 0.7822285069996724,
"children": {
"run_training.setup": {
"total": 0.060827821999964726,
"count": 1,
"self": 0.060827821999964726
},
"TrainerController.start_learning": {
"total": 498.6601133190002,
"count": 1,
"self": 0.6405420679907365,
"children": {
"TrainerController._reset_env": {
"total": 4.440519054999868,
"count": 1,
"self": 4.440519054999868
},
"TrainerController.advance": {
"total": 493.4364566880097,
"count": 18199,
"self": 0.32158226798560463,
"children": {
"env_step": {
"total": 493.1148744200241,
"count": 18199,
"self": 337.9749713290694,
"children": {
"SubprocessEnvManager._take_step": {
"total": 154.81815013598884,
"count": 18199,
"self": 1.556145688006609,
"children": {
"TorchPolicy.evaluate": {
"total": 153.26200444798224,
"count": 18199,
"self": 153.26200444798224
}
}
},
"workers": {
"total": 0.3217529549658593,
"count": 18199,
"self": 0.0,
"children": {
"worker_root": {
"total": 497.2482991600061,
"count": 18199,
"is_parallel": true,
"self": 243.81574547099103,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00596211700008098,
"count": 1,
"is_parallel": true,
"self": 0.0038308880000386125,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0021312290000423673,
"count": 10,
"is_parallel": true,
"self": 0.0021312290000423673
}
}
},
"UnityEnvironment.step": {
"total": 0.03775803200005612,
"count": 1,
"is_parallel": true,
"self": 0.0034955150001678703,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004095140000117681,
"count": 1,
"is_parallel": true,
"self": 0.0004095140000117681
},
"communicator.exchange": {
"total": 0.03184841500001312,
"count": 1,
"is_parallel": true,
"self": 0.03184841500001312
},
"steps_from_proto": {
"total": 0.002004587999863361,
"count": 1,
"is_parallel": true,
"self": 0.00040410599990536866,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016004819999579922,
"count": 10,
"is_parallel": true,
"self": 0.0016004819999579922
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 253.4325536890151,
"count": 18198,
"is_parallel": true,
"self": 10.633412111014877,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.474986393018526,
"count": 18198,
"is_parallel": true,
"self": 5.474986393018526
},
"communicator.exchange": {
"total": 203.12878586297415,
"count": 18198,
"is_parallel": true,
"self": 203.12878586297415
},
"steps_from_proto": {
"total": 34.19536932200754,
"count": 18198,
"is_parallel": true,
"self": 6.537842645088858,
"children": {
"_process_rank_one_or_two_observation": {
"total": 27.65752667691868,
"count": 181980,
"is_parallel": true,
"self": 27.65752667691868
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0001274889996238926,
"count": 1,
"self": 0.0001274889996238926,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 489.12730825092103,
"count": 528934,
"is_parallel": true,
"self": 11.284875172738566,
"children": {
"process_trajectory": {
"total": 276.04658962818166,
"count": 528934,
"is_parallel": true,
"self": 275.2287628381821,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8178267899995717,
"count": 4,
"is_parallel": true,
"self": 0.8178267899995717
}
}
},
"_update_policy": {
"total": 201.7958434500008,
"count": 90,
"is_parallel": true,
"self": 68.51273517798927,
"children": {
"TorchPPOOptimizer.update": {
"total": 133.28310827201153,
"count": 4587,
"is_parallel": true,
"self": 133.28310827201153
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.14246801900026185,
"count": 1,
"self": 0.0012432310004442115,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14122478799981764,
"count": 1,
"self": 0.14122478799981764
}
}
}
}
}
}
}