ppo-PyramidsRND / run_logs /timers.json
RamtinMoslemi's picture
Second Push
34716b2 verified
raw
history blame contribute delete
No virus
18.8 kB
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.16138200461864471,
"min": 0.15361294150352478,
"max": 1.3788249492645264,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 4805.310546875,
"min": 4598.36767578125,
"max": 41828.03515625,
"count": 100
},
"Pyramids.Step.mean": {
"value": 2999927.0,
"min": 29899.0,
"max": 2999927.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 2999927.0,
"min": 29899.0,
"max": 2999927.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7915738821029663,
"min": -0.21575289964675903,
"max": 0.8600931167602539,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 235.09744262695312,
"min": -51.13343811035156,
"max": 263.171142578125,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.009203523397445679,
"min": -0.010924026370048523,
"max": 0.46224427223205566,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.7334463596343994,
"min": -3.1879029273986816,
"max": 109.55189514160156,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06845400046347835,
"min": 0.06262469362437037,
"max": 0.07451761776676222,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.958356006488697,
"min": 0.5216233243673355,
"max": 1.091893672882625,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015977201329107873,
"min": 0.00026986948155374756,
"max": 0.01666934664788524,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22368081860751024,
"min": 0.0035083032601987184,
"max": 0.23337085307039337,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.4669495110499997e-06,
"min": 1.4669495110499997e-06,
"max": 0.00029838354339596195,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.0537293154699995e-05,
"min": 2.0537293154699995e-05,
"max": 0.004011905262698266,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10048895000000001,
"min": 0.10048895000000001,
"max": 0.19946118095238097,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4068453,
"min": 1.3962282666666668,
"max": 2.7375528999999994,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 5.8846104999999986e-05,
"min": 5.8846104999999986e-05,
"max": 0.009946171977142856,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0008238454699999998,
"min": 0.0008238454699999998,
"max": 0.13373644316,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.0070102340541779995,
"min": 0.0070102340541779995,
"max": 0.6094204783439636,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.09814327955245972,
"min": 0.09814327955245972,
"max": 4.26594352722168,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 232.10236220472441,
"min": 208.62585034013605,
"max": 999.0,
"count": 100
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29477.0,
"min": 16826.0,
"max": 32483.0,
"count": 100
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.735036491519875,
"min": -0.9997125510126352,
"max": 1.783169102800243,
"count": 100
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 218.61459793150425,
"min": -31.990801632404327,
"max": 259.5399991944432,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.735036491519875,
"min": -0.9997125510126352,
"max": 1.783169102800243,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 218.61459793150425,
"min": -31.990801632404327,
"max": 259.5399991944432,
"count": 100
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.01734163427930085,
"min": 0.01644793654904925,
"max": 11.900502757114523,
"count": 100
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.185045919191907,
"min": 2.185045919191907,
"max": 202.30854687094688,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1717533265",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn /content/ml-agents/config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1717540779"
},
"total": 7513.8324267709995,
"count": 1,
"self": 0.481063818999246,
"children": {
"run_training.setup": {
"total": 0.05127542399986851,
"count": 1,
"self": 0.05127542399986851
},
"TrainerController.start_learning": {
"total": 7513.300087528,
"count": 1,
"self": 4.344441298201673,
"children": {
"TrainerController._reset_env": {
"total": 2.29705445400009,
"count": 1,
"self": 2.29705445400009
},
"TrainerController.advance": {
"total": 7506.574606427799,
"count": 195005,
"self": 4.559219622864475,
"children": {
"env_step": {
"total": 5559.115163401107,
"count": 195005,
"self": 5146.037840407925,
"children": {
"SubprocessEnvManager._take_step": {
"total": 410.4382893919378,
"count": 195005,
"self": 14.614020405244219,
"children": {
"TorchPolicy.evaluate": {
"total": 395.8242689866936,
"count": 187543,
"self": 395.8242689866936
}
}
},
"workers": {
"total": 2.639033601244364,
"count": 195005,
"self": 0.0,
"children": {
"worker_root": {
"total": 7496.428331201989,
"count": 195005,
"is_parallel": true,
"self": 2737.4647671631656,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021945080000023154,
"count": 1,
"is_parallel": true,
"self": 0.0007076510003116709,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014868569996906444,
"count": 8,
"is_parallel": true,
"self": 0.0014868569996906444
}
}
},
"UnityEnvironment.step": {
"total": 0.052471475999936956,
"count": 1,
"is_parallel": true,
"self": 0.0007135799999105075,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005330520000370598,
"count": 1,
"is_parallel": true,
"self": 0.0005330520000370598
},
"communicator.exchange": {
"total": 0.04945247199998448,
"count": 1,
"is_parallel": true,
"self": 0.04945247199998448
},
"steps_from_proto": {
"total": 0.001772372000004907,
"count": 1,
"is_parallel": true,
"self": 0.00037776999988636817,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013946020001185389,
"count": 8,
"is_parallel": true,
"self": 0.0013946020001185389
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 4758.963564038823,
"count": 195004,
"is_parallel": true,
"self": 104.25589663243409,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 74.41635028916221,
"count": 195004,
"is_parallel": true,
"self": 74.41635028916221
},
"communicator.exchange": {
"total": 4268.412138126955,
"count": 195004,
"is_parallel": true,
"self": 4268.412138126955
},
"steps_from_proto": {
"total": 311.8791789902714,
"count": 195004,
"is_parallel": true,
"self": 64.4930299291616,
"children": {
"_process_rank_one_or_two_observation": {
"total": 247.3861490611098,
"count": 1560032,
"is_parallel": true,
"self": 247.3861490611098
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1942.900223403828,
"count": 195005,
"self": 8.610558539808153,
"children": {
"process_trajectory": {
"total": 409.861800114026,
"count": 195005,
"self": 409.29116479002755,
"children": {
"RLTrainer._checkpoint": {
"total": 0.570635323998431,
"count": 6,
"self": 0.570635323998431
}
}
},
"_update_policy": {
"total": 1524.427864749994,
"count": 1401,
"self": 900.9794533438289,
"children": {
"TorchPPOOptimizer.update": {
"total": 623.448411406165,
"count": 68388,
"self": 623.448411406165
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0099993232870474e-06,
"count": 1,
"self": 1.0099993232870474e-06
},
"TrainerController._save_models": {
"total": 0.08398433799993654,
"count": 1,
"self": 0.001561474999107304,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08242286300082924,
"count": 1,
"self": 0.08242286300082924
}
}
}
}
}
}
}