ashishj20's picture
First Push
c1eeb10
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.84099942445755,
"min": 0.84099942445755,
"max": 2.8697006702423096,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8048.3642578125,
"min": 8048.3642578125,
"max": 29451.73828125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.819323539733887,
"min": 0.30684253573417664,
"max": 12.819323539733887,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2499.76806640625,
"min": 59.52745056152344,
"max": 2592.494873046875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06692133571603193,
"min": 0.06177563469861756,
"max": 0.07365187178020634,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2676853428641277,
"min": 0.24710253879447025,
"max": 0.36500927746893985,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20537949470328348,
"min": 0.09216363705562282,
"max": 0.2643341215802174,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8215179788131339,
"min": 0.3686545482224913,
"max": 1.3216706079010871,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.15909090909091,
"min": 2.75,
"max": 25.727272727272727,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1107.0,
"min": 121.0,
"max": 1385.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.15909090909091,
"min": 2.75,
"max": 25.727272727272727,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1107.0,
"min": 121.0,
"max": 1385.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679287468",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679287936"
},
"total": 468.042031813,
"count": 1,
"self": 0.4302461139998286,
"children": {
"run_training.setup": {
"total": 0.10232763700003034,
"count": 1,
"self": 0.10232763700003034
},
"TrainerController.start_learning": {
"total": 467.50945806200014,
"count": 1,
"self": 0.5136322209995114,
"children": {
"TrainerController._reset_env": {
"total": 9.301476305000051,
"count": 1,
"self": 9.301476305000051
},
"TrainerController.advance": {
"total": 457.5621709900008,
"count": 18204,
"self": 0.2622146870139659,
"children": {
"env_step": {
"total": 457.29995630298686,
"count": 18204,
"self": 329.4158926009734,
"children": {
"SubprocessEnvManager._take_step": {
"total": 127.62186527599135,
"count": 18204,
"self": 2.212752491985725,
"children": {
"TorchPolicy.evaluate": {
"total": 125.40911278400563,
"count": 18204,
"self": 125.40911278400563
}
}
},
"workers": {
"total": 0.26219842602210974,
"count": 18204,
"self": 0.0,
"children": {
"worker_root": {
"total": 466.08130520600287,
"count": 18204,
"is_parallel": true,
"self": 224.41827065599648,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005844182000032561,
"count": 1,
"is_parallel": true,
"self": 0.004375514000003022,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014686680000295382,
"count": 10,
"is_parallel": true,
"self": 0.0014686680000295382
}
}
},
"UnityEnvironment.step": {
"total": 0.10426534400005494,
"count": 1,
"is_parallel": true,
"self": 0.000578623000137668,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00041031800003565877,
"count": 1,
"is_parallel": true,
"self": 0.00041031800003565877
},
"communicator.exchange": {
"total": 0.09832509699992897,
"count": 1,
"is_parallel": true,
"self": 0.09832509699992897
},
"steps_from_proto": {
"total": 0.004951305999952638,
"count": 1,
"is_parallel": true,
"self": 0.0019768619997648784,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0029744440001877592,
"count": 10,
"is_parallel": true,
"self": 0.0029744440001877592
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 241.6630345500064,
"count": 18203,
"is_parallel": true,
"self": 9.388898416996994,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.2940915740017545,
"count": 18203,
"is_parallel": true,
"self": 5.2940915740017545
},
"communicator.exchange": {
"total": 195.4837719090126,
"count": 18203,
"is_parallel": true,
"self": 195.4837719090126
},
"steps_from_proto": {
"total": 31.496272649995035,
"count": 18203,
"is_parallel": true,
"self": 6.236019759977694,
"children": {
"_process_rank_one_or_two_observation": {
"total": 25.26025289001734,
"count": 182030,
"is_parallel": true,
"self": 25.26025289001734
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00011301799986540573,
"count": 1,
"self": 0.00011301799986540573,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 453.973135810029,
"count": 413925,
"is_parallel": true,
"self": 10.220198568976116,
"children": {
"process_trajectory": {
"total": 252.7678580040532,
"count": 413925,
"is_parallel": true,
"self": 251.89290529005348,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8749527139997326,
"count": 4,
"is_parallel": true,
"self": 0.8749527139997326
}
}
},
"_update_policy": {
"total": 190.9850792369997,
"count": 90,
"is_parallel": true,
"self": 69.59313257600206,
"children": {
"TorchPPOOptimizer.update": {
"total": 121.39194666099763,
"count": 4587,
"is_parallel": true,
"self": 121.39194666099763
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.13206552799988458,
"count": 1,
"self": 0.0010672229998363036,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13099830500004828,
"count": 1,
"self": 0.13099830500004828
}
}
}
}
}
}
}