ecemisildar's picture
First Push
583358f
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0946364402770996,
"min": 1.0928155183792114,
"max": 2.2952566146850586,
"count": 16
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10571.9990234375,
"min": 10571.9990234375,
"max": 21262.677734375,
"count": 16
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 49936.0,
"max": 199984.0,
"count": 16
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 49936.0,
"max": 199984.0,
"count": 16
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 11.663004875183105,
"min": 4.613474369049072,
"max": 11.663004875183105,
"count": 16
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2390.916015625,
"min": 415.2126770019531,
"max": 2390.916015625,
"count": 16
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 16
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 4378.0,
"max": 10945.0,
"count": 16
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06690535414617282,
"min": 0.0629664167288366,
"max": 0.08164551496441823,
"count": 16
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3345267707308641,
"min": 0.16329102992883646,
"max": 0.37080980874780634,
"count": 16
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20293266107054317,
"min": 0.20219451240172573,
"max": 0.2816235404826847,
"count": 16
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.0146633053527159,
"min": 0.4996212343776635,
"max": 1.321066310008367,
"count": 16
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 7.620097459999994e-06,
"min": 7.620097459999994e-06,
"max": 0.00022707002430999998,
"count": 16
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.810048729999997e-05,
"min": 3.810048729999997e-05,
"max": 0.0010116001628000001,
"count": 16
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10254,
"min": 0.10254,
"max": 0.17569,
"count": 16
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5127,
"min": 0.35138,
"max": 0.8371999999999999,
"count": 16
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001367459999999999,
"min": 0.0001367459999999999,
"max": 0.0037869310000000008,
"count": 16
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0006837299999999995,
"min": 0.0006837299999999995,
"max": 0.01687628,
"count": 16
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 23.29090909090909,
"min": 11.363636363636363,
"max": 23.29090909090909,
"count": 16
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1281.0,
"min": 250.0,
"max": 1281.0,
"count": 16
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 23.29090909090909,
"min": 11.363636363636363,
"max": 23.29090909090909,
"count": 16
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1281.0,
"min": 250.0,
"max": 1281.0,
"count": 16
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678982457",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1678982815"
},
"total": 357.5105299379993,
"count": 1,
"self": 0.4229530849988805,
"children": {
"run_training.setup": {
"total": 0.10437413999989076,
"count": 1,
"self": 0.10437413999989076
},
"TrainerController.start_learning": {
"total": 356.98320271300054,
"count": 1,
"self": 0.42532609600129945,
"children": {
"TrainerController._reset_env": {
"total": 5.815293917000417,
"count": 1,
"self": 5.815293917000417
},
"TrainerController.advance": {
"total": 350.609135067999,
"count": 14076,
"self": 0.2169891020112118,
"children": {
"env_step": {
"total": 350.39214596598777,
"count": 14076,
"self": 254.35593229293045,
"children": {
"SubprocessEnvManager._take_step": {
"total": 95.8235493300399,
"count": 14076,
"self": 1.5876107260501158,
"children": {
"TorchPolicy.evaluate": {
"total": 94.23593860398978,
"count": 14076,
"self": 94.23593860398978
}
}
},
"workers": {
"total": 0.21266434301742265,
"count": 14076,
"self": 0.0,
"children": {
"worker_root": {
"total": 355.8562420690605,
"count": 14076,
"is_parallel": true,
"self": 167.9133230540965,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019935740001528757,
"count": 1,
"is_parallel": true,
"self": 0.0006973040008233511,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012962699993295246,
"count": 10,
"is_parallel": true,
"self": 0.0012962699993295246
}
}
},
"UnityEnvironment.step": {
"total": 0.04526850700040086,
"count": 1,
"is_parallel": true,
"self": 0.0004340719997344422,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00028055500024493085,
"count": 1,
"is_parallel": true,
"self": 0.00028055500024493085
},
"communicator.exchange": {
"total": 0.04302806400028203,
"count": 1,
"is_parallel": true,
"self": 0.04302806400028203
},
"steps_from_proto": {
"total": 0.0015258160001394572,
"count": 1,
"is_parallel": true,
"self": 0.0003153769985146937,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012104390016247635,
"count": 10,
"is_parallel": true,
"self": 0.0012104390016247635
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 187.942919014964,
"count": 14075,
"is_parallel": true,
"self": 7.318242229825955,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 3.949933673029591,
"count": 14075,
"is_parallel": true,
"self": 3.949933673029591
},
"communicator.exchange": {
"total": 152.8487242990468,
"count": 14075,
"is_parallel": true,
"self": 152.8487242990468
},
"steps_from_proto": {
"total": 23.826018813061637,
"count": 14075,
"is_parallel": true,
"self": 4.675390604853419,
"children": {
"_process_rank_one_or_two_observation": {
"total": 19.150628208208218,
"count": 140750,
"is_parallel": true,
"self": 19.150628208208218
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.511099996307166e-05,
"count": 1,
"self": 4.511099996307166e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 348.0652949210498,
"count": 311576,
"is_parallel": true,
"self": 7.311375475193017,
"children": {
"process_trajectory": {
"total": 193.2930408948596,
"count": 311576,
"is_parallel": true,
"self": 192.29040123285995,
"children": {
"RLTrainer._checkpoint": {
"total": 1.0026396619996376,
"count": 4,
"is_parallel": true,
"self": 1.0026396619996376
}
}
},
"_update_policy": {
"total": 147.46087855099722,
"count": 70,
"is_parallel": true,
"self": 56.242486161010675,
"children": {
"TorchPPOOptimizer.update": {
"total": 91.21839238998655,
"count": 3567,
"is_parallel": true,
"self": 91.21839238998655
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.13340252099987993,
"count": 1,
"self": 0.0010547809997660806,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13234774000011384,
"count": 1,
"self": 0.13234774000011384
}
}
}
}
}
}
}