saikiranp's picture
First training of Snowball Target
56a8ffa
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8437650799751282,
"min": 0.8437650799751282,
"max": 2.850801706314087,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8056.26904296875,
"min": 8056.26904296875,
"max": 29195.060546875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.734084129333496,
"min": 0.2894265353679657,
"max": 12.734084129333496,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2483.146484375,
"min": 56.14875030517578,
"max": 2575.970703125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06282898773336937,
"min": 0.06282898773336937,
"max": 0.07244137072998189,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.25131595093347747,
"min": 0.25131595093347747,
"max": 0.3612315757964811,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.17994225956499577,
"min": 0.1571069920205,
"max": 0.2871464216241649,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7197690382599831,
"min": 0.628427968082,
"max": 1.4357321081208247,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.15909090909091,
"min": 4.181818181818182,
"max": 25.15909090909091,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1107.0,
"min": 184.0,
"max": 1358.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.15909090909091,
"min": 4.181818181818182,
"max": 25.15909090909091,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1107.0,
"min": 184.0,
"max": 1358.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673450988",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673451490"
},
"total": 501.8924421559999,
"count": 1,
"self": 0.44263200800003233,
"children": {
"run_training.setup": {
"total": 0.1207376209999893,
"count": 1,
"self": 0.1207376209999893
},
"TrainerController.start_learning": {
"total": 501.3290725269999,
"count": 1,
"self": 0.6142388870017612,
"children": {
"TrainerController._reset_env": {
"total": 7.408316877000061,
"count": 1,
"self": 7.408316877000061
},
"TrainerController.advance": {
"total": 493.1627470219979,
"count": 18200,
"self": 0.31925075798881153,
"children": {
"env_step": {
"total": 492.8434962640091,
"count": 18200,
"self": 323.4304437470172,
"children": {
"SubprocessEnvManager._take_step": {
"total": 169.0826684720048,
"count": 18200,
"self": 1.60508131101858,
"children": {
"TorchPolicy.evaluate": {
"total": 167.47758716098622,
"count": 18200,
"self": 38.588993757994444,
"children": {
"TorchPolicy.sample_actions": {
"total": 128.88859340299177,
"count": 18200,
"self": 128.88859340299177
}
}
}
}
},
"workers": {
"total": 0.33038404498711316,
"count": 18200,
"self": 0.0,
"children": {
"worker_root": {
"total": 499.8304511590153,
"count": 18200,
"is_parallel": true,
"self": 239.83043893100137,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006758191000017177,
"count": 1,
"is_parallel": true,
"self": 0.0038679420001699327,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0028902489998472447,
"count": 10,
"is_parallel": true,
"self": 0.0028902489998472447
}
}
},
"UnityEnvironment.step": {
"total": 0.04950251500008562,
"count": 1,
"is_parallel": true,
"self": 0.000585810000075071,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00027105000003757596,
"count": 1,
"is_parallel": true,
"self": 0.00027105000003757596
},
"communicator.exchange": {
"total": 0.04644726399999399,
"count": 1,
"is_parallel": true,
"self": 0.04644726399999399
},
"steps_from_proto": {
"total": 0.002198390999978983,
"count": 1,
"is_parallel": true,
"self": 0.000509059000137313,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00168933199984167,
"count": 10,
"is_parallel": true,
"self": 0.00168933199984167
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 260.00001222801393,
"count": 18199,
"is_parallel": true,
"self": 9.939086147019339,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 6.0913738959872035,
"count": 18199,
"is_parallel": true,
"self": 6.0913738959872035
},
"communicator.exchange": {
"total": 206.0390089070064,
"count": 18199,
"is_parallel": true,
"self": 206.0390089070064
},
"steps_from_proto": {
"total": 37.93054327800098,
"count": 18199,
"is_parallel": true,
"self": 8.431064895011787,
"children": {
"_process_rank_one_or_two_observation": {
"total": 29.499478382989196,
"count": 181990,
"is_parallel": true,
"self": 29.499478382989196
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.60999999379419e-05,
"count": 1,
"self": 5.60999999379419e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 489.26491889003955,
"count": 395232,
"is_parallel": true,
"self": 11.765129435071799,
"children": {
"process_trajectory": {
"total": 280.3610658749684,
"count": 395232,
"is_parallel": true,
"self": 279.57787335996807,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7831925150003372,
"count": 4,
"is_parallel": true,
"self": 0.7831925150003372
}
}
},
"_update_policy": {
"total": 197.13872357999935,
"count": 90,
"is_parallel": true,
"self": 49.323276884996176,
"children": {
"TorchPPOOptimizer.update": {
"total": 147.81544669500317,
"count": 4587,
"is_parallel": true,
"self": 147.81544669500317
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.143713641000204,
"count": 1,
"self": 0.0009734450002270023,
"children": {
"RLTrainer._checkpoint": {
"total": 0.142740195999977,
"count": 1,
"self": 0.142740195999977
}
}
}
}
}
}
}