yangzhou301's picture
First Push
62f95de
raw
history blame contribute delete
No virus
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.222894549369812,
"min": 1.1957294940948486,
"max": 2.86820387840271,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 11703.1005859375,
"min": 11703.1005859375,
"max": 29341.7265625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 10.77051067352295,
"min": 0.3626998960971832,
"max": 10.77051067352295,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2100.24951171875,
"min": 70.36377716064453,
"max": 2165.253662109375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07178348389838814,
"min": 0.062435955798005746,
"max": 0.07329488800000855,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.28713393559355255,
"min": 0.24974382319202298,
"max": 0.35899226152504726,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20851520704580287,
"min": 0.1271367534908338,
"max": 0.2848715939358169,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8340608281832115,
"min": 0.5085470139633352,
"max": 1.4243579696790845,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 5.3880973060000045e-06,
"min": 5.3880973060000045e-06,
"max": 0.00019458800270600002,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 2.1552389224000018e-05,
"min": 2.1552389224000018e-05,
"max": 0.0009234400382800001,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 21.477272727272727,
"min": 3.9545454545454546,
"max": 21.477272727272727,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 945.0,
"min": 174.0,
"max": 1181.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 21.477272727272727,
"min": 3.9545454545454546,
"max": 21.477272727272727,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 945.0,
"min": 174.0,
"max": 1181.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1704866611",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1704867101"
},
"total": 489.77684522200025,
"count": 1,
"self": 0.4433606500001588,
"children": {
"run_training.setup": {
"total": 0.07348486699993373,
"count": 1,
"self": 0.07348486699993373
},
"TrainerController.start_learning": {
"total": 489.25999970500015,
"count": 1,
"self": 0.635392456973932,
"children": {
"TrainerController._reset_env": {
"total": 3.245709869999928,
"count": 1,
"self": 3.245709869999928
},
"TrainerController.advance": {
"total": 485.2848102610267,
"count": 18201,
"self": 0.3073347400111288,
"children": {
"env_step": {
"total": 484.97747552101555,
"count": 18201,
"self": 321.36732814397965,
"children": {
"SubprocessEnvManager._take_step": {
"total": 163.30507186102068,
"count": 18201,
"self": 1.5870296060190867,
"children": {
"TorchPolicy.evaluate": {
"total": 161.7180422550016,
"count": 18201,
"self": 161.7180422550016
}
}
},
"workers": {
"total": 0.3050755160152221,
"count": 18201,
"self": 0.0,
"children": {
"worker_root": {
"total": 487.80972605001284,
"count": 18201,
"is_parallel": true,
"self": 240.0891703279667,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005092842999829372,
"count": 1,
"is_parallel": true,
"self": 0.0037259790001371584,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013668639996922138,
"count": 10,
"is_parallel": true,
"self": 0.0013668639996922138
}
}
},
"UnityEnvironment.step": {
"total": 0.04166752800006179,
"count": 1,
"is_parallel": true,
"self": 0.0008131489998959296,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004406440000366274,
"count": 1,
"is_parallel": true,
"self": 0.0004406440000366274
},
"communicator.exchange": {
"total": 0.03804670099998475,
"count": 1,
"is_parallel": true,
"self": 0.03804670099998475
},
"steps_from_proto": {
"total": 0.0023670340001444856,
"count": 1,
"is_parallel": true,
"self": 0.0006308840002020588,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017361499999424268,
"count": 10,
"is_parallel": true,
"self": 0.0017361499999424268
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 247.72055572204613,
"count": 18200,
"is_parallel": true,
"self": 11.638204057072471,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.908854284994277,
"count": 18200,
"is_parallel": true,
"self": 5.908854284994277
},
"communicator.exchange": {
"total": 192.1602811730029,
"count": 18200,
"is_parallel": true,
"self": 192.1602811730029
},
"steps_from_proto": {
"total": 38.013216206976495,
"count": 18200,
"is_parallel": true,
"self": 7.181475276964193,
"children": {
"_process_rank_one_or_two_observation": {
"total": 30.8317409300123,
"count": 182000,
"is_parallel": true,
"self": 30.8317409300123
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00018326700001125573,
"count": 1,
"self": 0.00018326700001125573,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 478.93540711298965,
"count": 719943,
"is_parallel": true,
"self": 15.77938000191898,
"children": {
"process_trajectory": {
"total": 266.8500336260706,
"count": 719943,
"is_parallel": true,
"self": 265.96158975507046,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8884438710001632,
"count": 4,
"is_parallel": true,
"self": 0.8884438710001632
}
}
},
"_update_policy": {
"total": 196.30599348500004,
"count": 90,
"is_parallel": true,
"self": 63.336505664001834,
"children": {
"TorchPPOOptimizer.update": {
"total": 132.9694878209982,
"count": 4584,
"is_parallel": true,
"self": 132.9694878209982
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.09390384999960588,
"count": 1,
"self": 0.0010446769997543015,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09285917299985158,
"count": 1,
"self": 0.09285917299985158
}
}
}
}
}
}
}