Mullerjo's picture
First Push
6116a60 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.7075469493865967,
"min": 1.7075469493865967,
"max": 2.8718860149383545,
"count": 10
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 17486.98828125,
"min": 17128.03125,
"max": 29442.576171875,
"count": 10
},
"SnowballTarget.Step.mean": {
"value": 99960.0,
"min": 9952.0,
"max": 99960.0,
"count": 10
},
"SnowballTarget.Step.sum": {
"value": 99960.0,
"min": 9952.0,
"max": 99960.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 9.08544635772705,
"min": 0.2940658926963806,
"max": 9.08544635772705,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 1853.43115234375,
"min": 57.04878234863281,
"max": 1853.43115234375,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06652085697606511,
"min": 0.06085493482148828,
"max": 0.07853022093460073,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.33260428488032556,
"min": 0.24341973928595312,
"max": 0.35824057095377093,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.27105463462717394,
"min": 0.11490086922718795,
"max": 0.2726640262440139,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.3552731731358696,
"min": 0.4596034769087518,
"max": 1.3633201312200696,
"count": 10
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.6464094511999996e-05,
"min": 1.6464094511999996e-05,
"max": 0.000283764005412,
"count": 10
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 8.232047255999999e-05,
"min": 8.232047255999999e-05,
"max": 0.00127032007656,
"count": 10
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.105488,
"min": 0.105488,
"max": 0.194588,
"count": 10
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.52744,
"min": 0.4615520000000001,
"max": 0.92344,
"count": 10
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0002838512,
"min": 0.0002838512,
"max": 0.0047299412,
"count": 10
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.001419256,
"min": 0.001419256,
"max": 0.021179656,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 19.490909090909092,
"min": 3.4318181818181817,
"max": 19.490909090909092,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1072.0,
"min": 151.0,
"max": 1072.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 19.490909090909092,
"min": 3.4318181818181817,
"max": 19.490909090909092,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1072.0,
"min": 151.0,
"max": 1072.0,
"count": 10
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1716115921",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1716116234"
},
"total": 313.00819820899994,
"count": 1,
"self": 0.6507097009999825,
"children": {
"run_training.setup": {
"total": 0.07402426900000592,
"count": 1,
"self": 0.07402426900000592
},
"TrainerController.start_learning": {
"total": 312.28346423899995,
"count": 1,
"self": 0.7972583400098756,
"children": {
"TrainerController._reset_env": {
"total": 4.141600967000045,
"count": 1,
"self": 4.141600967000045
},
"TrainerController.advance": {
"total": 307.22816979399,
"count": 9135,
"self": 0.22386033798909466,
"children": {
"env_step": {
"total": 307.0043094560009,
"count": 9135,
"self": 235.53236298799072,
"children": {
"SubprocessEnvManager._take_step": {
"total": 71.25388873699774,
"count": 9135,
"self": 1.3071070909990112,
"children": {
"TorchPolicy.evaluate": {
"total": 69.94678164599873,
"count": 9135,
"self": 69.94678164599873
}
}
},
"workers": {
"total": 0.2180577310124363,
"count": 9135,
"self": 0.0,
"children": {
"worker_root": {
"total": 310.8626557329991,
"count": 9135,
"is_parallel": true,
"self": 147.69347845899802,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.008673594000015328,
"count": 1,
"is_parallel": true,
"self": 0.0046821349999390804,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003991459000076247,
"count": 10,
"is_parallel": true,
"self": 0.003991459000076247
}
}
},
"UnityEnvironment.step": {
"total": 0.04667586600010054,
"count": 1,
"is_parallel": true,
"self": 0.0008145270001023164,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00047266499996112543,
"count": 1,
"is_parallel": true,
"self": 0.00047266499996112543
},
"communicator.exchange": {
"total": 0.04294306899998901,
"count": 1,
"is_parallel": true,
"self": 0.04294306899998901
},
"steps_from_proto": {
"total": 0.00244560500004809,
"count": 1,
"is_parallel": true,
"self": 0.00047981900013382983,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00196578599991426,
"count": 10,
"is_parallel": true,
"self": 0.00196578599991426
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 163.16917727400107,
"count": 9134,
"is_parallel": true,
"self": 7.721400475030691,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 3.927587473985227,
"count": 9134,
"is_parallel": true,
"self": 3.927587473985227
},
"communicator.exchange": {
"total": 128.59942833099,
"count": 9134,
"is_parallel": true,
"self": 128.59942833099
},
"steps_from_proto": {
"total": 22.920760993995145,
"count": 9134,
"is_parallel": true,
"self": 4.621585600948606,
"children": {
"_process_rank_one_or_two_observation": {
"total": 18.29917539304654,
"count": 91340,
"is_parallel": true,
"self": 18.29917539304654
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0005160260000138805,
"count": 1,
"self": 0.0005160260000138805,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 303.79662841900847,
"count": 401850,
"is_parallel": true,
"self": 9.484850194998558,
"children": {
"process_trajectory": {
"total": 167.71816147101083,
"count": 401850,
"is_parallel": true,
"self": 167.36281561001078,
"children": {
"RLTrainer._checkpoint": {
"total": 0.35534586100004617,
"count": 2,
"is_parallel": true,
"self": 0.35534586100004617
}
}
},
"_update_policy": {
"total": 126.59361675299908,
"count": 45,
"is_parallel": true,
"self": 33.52767215099789,
"children": {
"TorchPPOOptimizer.update": {
"total": 93.0659446020012,
"count": 2292,
"is_parallel": true,
"self": 93.0659446020012
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.11591911200002869,
"count": 1,
"self": 0.0015577140000004874,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1143613980000282,
"count": 1,
"self": 0.1143613980000282
}
}
}
}
}
}
}