arbts
Attempt2
70fc61d
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9076099991798401,
"min": 0.9076099991798401,
"max": 2.8623406887054443,
"count": 50
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8785.6650390625,
"min": 8785.6650390625,
"max": 29344.716796875,
"count": 50
},
"SnowballTarget.Step.mean": {
"value": 499976.0,
"min": 9952.0,
"max": 499976.0,
"count": 50
},
"SnowballTarget.Step.sum": {
"value": 499976.0,
"min": 9952.0,
"max": 499976.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 14.108038902282715,
"min": 0.5381110310554504,
"max": 14.108038902282715,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2892.14794921875,
"min": 104.39353942871094,
"max": 2892.14794921875,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07024131641887565,
"min": 0.05859752080954529,
"max": 0.0768773556619691,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3512065820943783,
"min": 0.23439008323818117,
"max": 0.36810750479078563,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.185192534327507,
"min": 0.14188875462038114,
"max": 0.3432107374218164,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.9259626716375351,
"min": 0.5675550184815246,
"max": 1.6073153565327325,
"count": 50
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.0176098982400003e-05,
"min": 1.0176098982400003e-05,
"max": 0.0009891760010824,
"count": 50
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 5.088049491200002e-05,
"min": 5.088049491200002e-05,
"max": 0.0048468800153119995,
"count": 50
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10101760000000001,
"min": 0.10101760000000001,
"max": 0.19891759999999997,
"count": 50
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5050880000000001,
"min": 0.41199040000000003,
"max": 0.984688,
"count": 50
},
"SnowballTarget.Policy.Beta.mean": {
"value": 8.113024000000005e-05,
"min": 8.113024000000005e-05,
"max": 0.00692434024,
"count": 50
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00040565120000000027,
"min": 0.00040565120000000027,
"max": 0.0339296912,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 27.618181818181817,
"min": 3.8181818181818183,
"max": 27.98181818181818,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1519.0,
"min": 168.0,
"max": 1539.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 27.618181818181817,
"min": 3.8181818181818183,
"max": 27.98181818181818,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1519.0,
"min": 168.0,
"max": 1539.0,
"count": 50
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1680431052",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1680432199"
},
"total": 1147.5846234949995,
"count": 1,
"self": 0.3831024679989241,
"children": {
"run_training.setup": {
"total": 0.10527087800028312,
"count": 1,
"self": 0.10527087800028312
},
"TrainerController.start_learning": {
"total": 1147.0962501490003,
"count": 1,
"self": 1.3202531629540317,
"children": {
"TrainerController._reset_env": {
"total": 3.4788207030001104,
"count": 1,
"self": 3.4788207030001104
},
"TrainerController.advance": {
"total": 1142.1677606410462,
"count": 45477,
"self": 0.6545610391285663,
"children": {
"env_step": {
"total": 1141.5131996019177,
"count": 45477,
"self": 831.4854457857655,
"children": {
"SubprocessEnvManager._take_step": {
"total": 309.3698402861228,
"count": 45477,
"self": 4.258624800032521,
"children": {
"TorchPolicy.evaluate": {
"total": 305.11121548609026,
"count": 45477,
"self": 305.11121548609026
}
}
},
"workers": {
"total": 0.6579135300294183,
"count": 45477,
"self": 0.0,
"children": {
"worker_root": {
"total": 1143.2282684159159,
"count": 45477,
"is_parallel": true,
"self": 532.258437057948,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001822249999804626,
"count": 1,
"is_parallel": true,
"self": 0.0005693119987881801,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001252938001016446,
"count": 10,
"is_parallel": true,
"self": 0.001252938001016446
}
}
},
"UnityEnvironment.step": {
"total": 0.047262445999876945,
"count": 1,
"is_parallel": true,
"self": 0.0003612219989008736,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00031873800026005483,
"count": 1,
"is_parallel": true,
"self": 0.00031873800026005483
},
"communicator.exchange": {
"total": 0.045516626000335236,
"count": 1,
"is_parallel": true,
"self": 0.045516626000335236
},
"steps_from_proto": {
"total": 0.0010658600003807805,
"count": 1,
"is_parallel": true,
"self": 0.00023944299937284086,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0008264170010079397,
"count": 10,
"is_parallel": true,
"self": 0.0008264170010079397
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 610.9698313579679,
"count": 45476,
"is_parallel": true,
"self": 24.534858940867252,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 13.40537451399996,
"count": 45476,
"is_parallel": true,
"self": 13.40537451399996
},
"communicator.exchange": {
"total": 492.1718367020362,
"count": 45476,
"is_parallel": true,
"self": 492.1718367020362
},
"steps_from_proto": {
"total": 80.85776120106448,
"count": 45476,
"is_parallel": true,
"self": 15.987068659212127,
"children": {
"_process_rank_one_or_two_observation": {
"total": 64.87069254185235,
"count": 454760,
"is_parallel": true,
"self": 64.87069254185235
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00011574400014069397,
"count": 1,
"self": 0.00011574400014069397,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 1133.4342856535195,
"count": 1034286,
"is_parallel": true,
"self": 25.187041478791798,
"children": {
"process_trajectory": {
"total": 627.3703568027217,
"count": 1034286,
"is_parallel": true,
"self": 624.8085960577223,
"children": {
"RLTrainer._checkpoint": {
"total": 2.5617607449994466,
"count": 10,
"is_parallel": true,
"self": 2.5617607449994466
}
}
},
"_update_policy": {
"total": 480.87688737200597,
"count": 227,
"is_parallel": true,
"self": 172.03355409504002,
"children": {
"TorchPPOOptimizer.update": {
"total": 308.84333327696595,
"count": 11574,
"is_parallel": true,
"self": 308.84333327696595
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.12929989799977193,
"count": 1,
"self": 0.000816151999970316,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1284837459998016,
"count": 1,
"self": 0.1284837459998016
}
}
}
}
}
}
}