Amirhnrn's picture
First Push
13ee487
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9593857526779175,
"min": 0.9498782753944397,
"max": 2.8788156509399414,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9191.875,
"min": 9191.875,
"max": 29513.6171875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.205334663391113,
"min": 0.4354381263256073,
"max": 13.205334663391113,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2575.040283203125,
"min": 84.4749984741211,
"max": 2679.617919921875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06946192981444718,
"min": 0.06214538616135114,
"max": 0.07482993320302864,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27784771925778873,
"min": 0.25064444603848124,
"max": 0.37349947194382493,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20614630400257952,
"min": 0.12997776126790356,
"max": 0.27957061283728657,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8245852160103181,
"min": 0.5199110450716142,
"max": 1.3978530641864328,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.15909090909091,
"min": 3.8636363636363638,
"max": 26.15909090909091,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1151.0,
"min": 170.0,
"max": 1430.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.15909090909091,
"min": 3.8636363636363638,
"max": 26.15909090909091,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1151.0,
"min": 170.0,
"max": 1430.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1693160797",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1693161270"
},
"total": 472.99093223499995,
"count": 1,
"self": 0.8328840789999958,
"children": {
"run_training.setup": {
"total": 0.04467707999992854,
"count": 1,
"self": 0.04467707999992854
},
"TrainerController.start_learning": {
"total": 472.113371076,
"count": 1,
"self": 0.5802813300031175,
"children": {
"TrainerController._reset_env": {
"total": 4.347673300999986,
"count": 1,
"self": 4.347673300999986
},
"TrainerController.advance": {
"total": 466.9393407089967,
"count": 18217,
"self": 0.2721774340018328,
"children": {
"env_step": {
"total": 466.6671632749949,
"count": 18217,
"self": 338.92875821598113,
"children": {
"SubprocessEnvManager._take_step": {
"total": 127.458840268004,
"count": 18217,
"self": 1.9718767910046608,
"children": {
"TorchPolicy.evaluate": {
"total": 125.48696347699934,
"count": 18217,
"self": 125.48696347699934
}
}
},
"workers": {
"total": 0.2795647910097614,
"count": 18217,
"self": 0.0,
"children": {
"worker_root": {
"total": 470.39041632499607,
"count": 18217,
"is_parallel": true,
"self": 222.5170435269904,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007894754000062676,
"count": 1,
"is_parallel": true,
"self": 0.00582183499977873,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002072919000283946,
"count": 10,
"is_parallel": true,
"self": 0.002072919000283946
}
}
},
"UnityEnvironment.step": {
"total": 0.05299805499998911,
"count": 1,
"is_parallel": true,
"self": 0.0005379639999318897,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00034517700009928376,
"count": 1,
"is_parallel": true,
"self": 0.00034517700009928376
},
"communicator.exchange": {
"total": 0.04978126500009239,
"count": 1,
"is_parallel": true,
"self": 0.04978126500009239
},
"steps_from_proto": {
"total": 0.002333648999865545,
"count": 1,
"is_parallel": true,
"self": 0.0004309789999297209,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019026699999358243,
"count": 10,
"is_parallel": true,
"self": 0.0019026699999358243
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 247.87337279800568,
"count": 18216,
"is_parallel": true,
"self": 10.628170196975816,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.389983229010568,
"count": 18216,
"is_parallel": true,
"self": 5.389983229010568
},
"communicator.exchange": {
"total": 195.77810993700814,
"count": 18216,
"is_parallel": true,
"self": 195.77810993700814
},
"steps_from_proto": {
"total": 36.07710943501115,
"count": 18216,
"is_parallel": true,
"self": 6.5762219680214,
"children": {
"_process_rank_one_or_two_observation": {
"total": 29.500887466989752,
"count": 182160,
"is_parallel": true,
"self": 29.500887466989752
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00015786800008754653,
"count": 1,
"self": 0.00015786800008754653,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 463.3658425759447,
"count": 439918,
"is_parallel": true,
"self": 9.521029598048926,
"children": {
"process_trajectory": {
"total": 251.54221240289485,
"count": 439918,
"is_parallel": true,
"self": 249.9808395878947,
"children": {
"RLTrainer._checkpoint": {
"total": 1.5613728150001407,
"count": 4,
"is_parallel": true,
"self": 1.5613728150001407
}
}
},
"_update_policy": {
"total": 202.30260057500095,
"count": 90,
"is_parallel": true,
"self": 83.72647889700238,
"children": {
"TorchPPOOptimizer.update": {
"total": 118.57612167799857,
"count": 4587,
"is_parallel": true,
"self": 118.57612167799857
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.24591786800010595,
"count": 1,
"self": 0.0011620260002018767,
"children": {
"RLTrainer._checkpoint": {
"total": 0.24475584199990408,
"count": 1,
"self": 0.24475584199990408
}
}
}
}
}
}
}