imflash217's picture
Pyramids training using RND_Curiosity
72f596e
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.32736504077911377,
"min": 0.30987367033958435,
"max": 1.4152685403823853,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9852.3779296875,
"min": 9187.134765625,
"max": 42933.5859375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989903.0,
"min": 29952.0,
"max": 989903.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989903.0,
"min": 29952.0,
"max": 989903.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6005477905273438,
"min": -0.1015888899564743,
"max": 0.6862479448318481,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 168.75393676757812,
"min": -24.482921600341797,
"max": 199.01190185546875,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.05392308533191681,
"min": -0.013329709880053997,
"max": 0.4041973650455475,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 15.152386665344238,
"min": -3.5990216732025146,
"max": 97.00736999511719,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06834013429207039,
"min": 0.0652287796113847,
"max": 0.07254256601750275,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9567618800889856,
"min": 0.47847769691190023,
"max": 1.0463036657019984,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014976887068422954,
"min": 0.0005003818480421692,
"max": 0.01638432307559664,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20967641895792136,
"min": 0.005003818480421692,
"max": 0.22938052305835296,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.634197455300003e-06,
"min": 7.634197455300003e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010687876437420005,
"min": 0.00010687876437420005,
"max": 0.0031442810519064,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1025447,
"min": 0.1025447,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4356258,
"min": 1.3691136000000002,
"max": 2.4003468,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026421553000000015,
"min": 0.00026421553000000015,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036990174200000024,
"min": 0.0036990174200000024,
"max": 0.10483455064,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01569577120244503,
"min": 0.015494920313358307,
"max": 0.6125134229660034,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.21974080801010132,
"min": 0.2169288843870163,
"max": 4.287593841552734,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 327.58762886597935,
"min": 279.13761467889907,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31776.0,
"min": 15984.0,
"max": 32466.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6311628685630475,
"min": -1.0000000521540642,
"max": 1.6849127127365633,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 158.2227982506156,
"min": -32.000001668930054,
"max": 185.34039840102196,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6311628685630475,
"min": -1.0000000521540642,
"max": 1.6849127127365633,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 158.2227982506156,
"min": -32.000001668930054,
"max": 185.34039840102196,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05337154749104604,
"min": 0.046798368955659427,
"max": 12.447560727596283,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 5.177040106631466,
"min": 4.352248312876327,
"max": 199.16097164154053,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1674832692",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids_RND_Curiosity --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1674834768"
},
"total": 2075.306866171,
"count": 1,
"self": 0.4359613389997321,
"children": {
"run_training.setup": {
"total": 0.10843290800005434,
"count": 1,
"self": 0.10843290800005434
},
"TrainerController.start_learning": {
"total": 2074.7624719240002,
"count": 1,
"self": 1.1662516729884373,
"children": {
"TrainerController._reset_env": {
"total": 6.070172855999999,
"count": 1,
"self": 6.070172855999999
},
"TrainerController.advance": {
"total": 2067.4402378980117,
"count": 63998,
"self": 1.2531364960223073,
"children": {
"env_step": {
"total": 1415.3067397109944,
"count": 63998,
"self": 1312.308863588998,
"children": {
"SubprocessEnvManager._take_step": {
"total": 102.26085192704807,
"count": 63998,
"self": 4.2079648330602595,
"children": {
"TorchPolicy.evaluate": {
"total": 98.0528870939878,
"count": 62565,
"self": 33.336836701015386,
"children": {
"TorchPolicy.sample_actions": {
"total": 64.71605039297242,
"count": 62565,
"self": 64.71605039297242
}
}
}
}
},
"workers": {
"total": 0.7370241949482761,
"count": 63998,
"self": 0.0,
"children": {
"worker_root": {
"total": 2071.2543027849792,
"count": 63998,
"is_parallel": true,
"self": 855.1600418060191,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001889641000047959,
"count": 1,
"is_parallel": true,
"self": 0.0006550270001071112,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012346139999408479,
"count": 8,
"is_parallel": true,
"self": 0.0012346139999408479
}
}
},
"UnityEnvironment.step": {
"total": 0.04382523599997512,
"count": 1,
"is_parallel": true,
"self": 0.00047273100005895685,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005960610000101951,
"count": 1,
"is_parallel": true,
"self": 0.0005960610000101951
},
"communicator.exchange": {
"total": 0.04116753699997844,
"count": 1,
"is_parallel": true,
"self": 0.04116753699997844
},
"steps_from_proto": {
"total": 0.0015889069999275307,
"count": 1,
"is_parallel": true,
"self": 0.0004101499999933367,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001178756999934194,
"count": 8,
"is_parallel": true,
"self": 0.001178756999934194
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1216.0942609789602,
"count": 63997,
"is_parallel": true,
"self": 27.489254769999434,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 21.894354905998966,
"count": 63997,
"is_parallel": true,
"self": 21.894354905998966
},
"communicator.exchange": {
"total": 1076.5815083929797,
"count": 63997,
"is_parallel": true,
"self": 1076.5815083929797
},
"steps_from_proto": {
"total": 90.12914290998185,
"count": 63997,
"is_parallel": true,
"self": 20.912696180001262,
"children": {
"_process_rank_one_or_two_observation": {
"total": 69.21644672998059,
"count": 511976,
"is_parallel": true,
"self": 69.21644672998059
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 650.8803616909948,
"count": 63998,
"self": 2.1824213260115357,
"children": {
"process_trajectory": {
"total": 140.419382325985,
"count": 63998,
"self": 140.23119696698495,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18818535900004463,
"count": 2,
"self": 0.18818535900004463
}
}
},
"_update_policy": {
"total": 508.2785580389983,
"count": 441,
"self": 187.27982716600104,
"children": {
"TorchPPOOptimizer.update": {
"total": 320.99873087299727,
"count": 22806,
"self": 320.99873087299727
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.000000318337698e-07,
"count": 1,
"self": 9.000000318337698e-07
},
"TrainerController._save_models": {
"total": 0.08580859699986831,
"count": 1,
"self": 0.001585890999649564,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08422270600021875,
"count": 1,
"self": 0.08422270600021875
}
}
}
}
}
}
}