varunsappa's picture
First Push
6da074e
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8055532574653625,
"min": 0.7974890470504761,
"max": 2.8554625511169434,
"count": 30
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8276.25390625,
"min": 7855.34912109375,
"max": 29242.79296875,
"count": 30
},
"SnowballTarget.Step.mean": {
"value": 299968.0,
"min": 9952.0,
"max": 299968.0,
"count": 30
},
"SnowballTarget.Step.sum": {
"value": 299968.0,
"min": 9952.0,
"max": 299968.0,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.26636028289795,
"min": 0.2261212319135666,
"max": 13.26636028289795,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2706.33740234375,
"min": 43.86751937866211,
"max": 2706.33740234375,
"count": 30
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 30
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 30
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07250593102573617,
"min": 0.06361447321062716,
"max": 0.07608299297092291,
"count": 30
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.36252965512868085,
"min": 0.25911760008688467,
"max": 0.3722585622625261,
"count": 30
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.17176396706525016,
"min": 0.11877474184125188,
"max": 0.2690399873444774,
"count": 30
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8588198353262508,
"min": 0.4750989673650075,
"max": 1.3289753072986414,
"count": 30
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 5.288098237333331e-06,
"min": 5.288098237333331e-06,
"max": 0.00029458800180399996,
"count": 30
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 2.6440491186666655e-05,
"min": 2.6440491186666655e-05,
"max": 0.0014234400255199997,
"count": 30
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10176266666666667,
"min": 0.10176266666666667,
"max": 0.198196,
"count": 30
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5088133333333333,
"min": 0.42025066666666666,
"max": 0.97448,
"count": 30
},
"SnowballTarget.Policy.Beta.mean": {
"value": 9.795706666666662e-05,
"min": 9.795706666666662e-05,
"max": 0.0049099804000000006,
"count": 30
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0004897853333333331,
"min": 0.0004897853333333331,
"max": 0.023726551999999998,
"count": 30
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.527272727272727,
"min": 3.3181818181818183,
"max": 26.527272727272727,
"count": 30
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1459.0,
"min": 146.0,
"max": 1459.0,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.527272727272727,
"min": 3.3181818181818183,
"max": 26.527272727272727,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1459.0,
"min": 146.0,
"max": 1459.0,
"count": 30
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 30
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 30
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1702880232",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/config.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1702880891"
},
"total": 658.639849972,
"count": 1,
"self": 0.44870032399990123,
"children": {
"run_training.setup": {
"total": 0.053578917999971054,
"count": 1,
"self": 0.053578917999971054
},
"TrainerController.start_learning": {
"total": 658.1375707300001,
"count": 1,
"self": 0.814569468999025,
"children": {
"TrainerController._reset_env": {
"total": 3.2650227579999864,
"count": 1,
"self": 3.2650227579999864
},
"TrainerController.advance": {
"total": 653.9684580330011,
"count": 27331,
"self": 0.3804264520073275,
"children": {
"env_step": {
"total": 653.5880315809937,
"count": 27331,
"self": 429.71914928598073,
"children": {
"SubprocessEnvManager._take_step": {
"total": 223.45796053300853,
"count": 27331,
"self": 2.178946096978734,
"children": {
"TorchPolicy.evaluate": {
"total": 221.2790144360298,
"count": 27331,
"self": 221.2790144360298
}
}
},
"workers": {
"total": 0.4109217620044774,
"count": 27331,
"self": 0.0,
"children": {
"worker_root": {
"total": 656.4618278449974,
"count": 27331,
"is_parallel": true,
"self": 323.43727517699733,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.003953400000000329,
"count": 1,
"is_parallel": true,
"self": 0.0027398440001888957,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012135559998114331,
"count": 10,
"is_parallel": true,
"self": 0.0012135559998114331
}
}
},
"UnityEnvironment.step": {
"total": 0.03986100899999201,
"count": 1,
"is_parallel": true,
"self": 0.0007084290000420879,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003714399999807938,
"count": 1,
"is_parallel": true,
"self": 0.0003714399999807938
},
"communicator.exchange": {
"total": 0.036644274999957815,
"count": 1,
"is_parallel": true,
"self": 0.036644274999957815
},
"steps_from_proto": {
"total": 0.002136865000011312,
"count": 1,
"is_parallel": true,
"self": 0.00041163899982166186,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017252260001896502,
"count": 10,
"is_parallel": true,
"self": 0.0017252260001896502
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 333.02455266800007,
"count": 27330,
"is_parallel": true,
"self": 15.749770333011611,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.792997897999612,
"count": 27330,
"is_parallel": true,
"self": 7.792997897999612
},
"communicator.exchange": {
"total": 260.0691906399935,
"count": 27330,
"is_parallel": true,
"self": 260.0691906399935
},
"steps_from_proto": {
"total": 49.41259379699534,
"count": 27330,
"is_parallel": true,
"self": 8.951614119018245,
"children": {
"_process_rank_one_or_two_observation": {
"total": 40.46097967797709,
"count": 273300,
"is_parallel": true,
"self": 40.46097967797709
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0005022390000704036,
"count": 1,
"self": 0.0005022390000704036,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 645.7611521119019,
"count": 981309,
"is_parallel": true,
"self": 20.24979291794807,
"children": {
"process_trajectory": {
"total": 358.4520205749543,
"count": 981309,
"is_parallel": true,
"self": 357.2673775479541,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1846430270002202,
"count": 6,
"is_parallel": true,
"self": 1.1846430270002202
}
}
},
"_update_policy": {
"total": 267.0593386189995,
"count": 136,
"is_parallel": true,
"self": 82.45793512799833,
"children": {
"TorchPPOOptimizer.update": {
"total": 184.60140349100118,
"count": 6930,
"is_parallel": true,
"self": 184.60140349100118
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.08901823099995454,
"count": 1,
"self": 0.001042293999944377,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08797593700001016,
"count": 1,
"self": 0.08797593700001016
}
}
}
}
}
}
}