ppo-Pyramids / run_logs /timers.json
dimitarrskv's picture
First Push
5eecef0
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 1.0700390338897705,
"min": 1.0700390338897705,
"max": 1.4682573080062866,
"count": 2
},
"Pyramids.Policy.Entropy.sum": {
"value": 32049.80859375,
"min": 32049.80859375,
"max": 44541.0546875,
"count": 2
},
"Pyramids.Step.mean": {
"value": 59888.0,
"min": 29952.0,
"max": 59888.0,
"count": 2
},
"Pyramids.Step.sum": {
"value": 59888.0,
"min": 29952.0,
"max": 59888.0,
"count": 2
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.03291097283363342,
"min": -0.03291097283363342,
"max": 0.10380222648382187,
"count": 2
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -7.931544780731201,
"min": -7.931544780731201,
"max": 24.60112762451172,
"count": 2
},
"Pyramids.Policy.CuriosityValueEstimate.mean": {
"value": 0.49802446365356445,
"min": 0.3091415464878082,
"max": 0.49802446365356445,
"count": 2
},
"Pyramids.Policy.CuriosityValueEstimate.sum": {
"value": 120.02389526367188,
"min": 73.26654815673828,
"max": 120.02389526367188,
"count": 2
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06751970424618535,
"min": 0.06751970424618535,
"max": 0.07114582863061501,
"count": 2
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.6076773382156682,
"min": 0.49802080041430513,
"max": 0.6076773382156682,
"count": 2
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0020629774291683815,
"min": 0.0020629774291683815,
"max": 0.004938054214438025,
"count": 2
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.018566796862515434,
"min": 0.018566796862515434,
"max": 0.034566379501066176,
"count": 2
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.150896505259259e-05,
"min": 7.150896505259259e-05,
"max": 0.00021917716979809524,
"count": 2
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0006435806854733333,
"min": 0.0006435806854733333,
"max": 0.0015342401885866668,
"count": 2
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.12383629629629632,
"min": 0.12383629629629632,
"max": 0.1730590476190476,
"count": 2
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.1145266666666669,
"min": 1.1145266666666669,
"max": 1.2114133333333332,
"count": 2
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0023912460000000005,
"min": 0.0023912460000000005,
"max": 0.007308598857142857,
"count": 2
},
"Pyramids.Policy.Beta.sum": {
"value": 0.021521214000000004,
"min": 0.021521214000000004,
"max": 0.051160192,
"count": 2
},
"Pyramids.Losses.CuriosityForwardLoss.mean": {
"value": 0.17579102421779083,
"min": 0.17579102421779083,
"max": 0.4865021553633762,
"count": 2
},
"Pyramids.Losses.CuriosityForwardLoss.sum": {
"value": 1.5821192179601176,
"min": 1.5821192179601176,
"max": 3.4055150875436335,
"count": 2
},
"Pyramids.Losses.CuriosityInverseLoss.mean": {
"value": 0.48593344143560446,
"min": 0.48593344143560446,
"max": 0.7601708495264571,
"count": 2
},
"Pyramids.Losses.CuriosityInverseLoss.sum": {
"value": 4.37340097292044,
"min": 4.37340097292044,
"max": 5.3211959466852,
"count": 2
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 983.7272727272727,
"min": 983.7272727272727,
"max": 999.0,
"count": 2
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 32463.0,
"min": 15984.0,
"max": 32463.0,
"count": 2
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.8634485349510655,
"min": -1.0000000521540642,
"max": -0.8634485349510655,
"count": 2
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -28.493801653385162,
"min": -28.493801653385162,
"max": -16.000000834465027,
"count": 2
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.8634485349510655,
"min": -1.0000000521540642,
"max": -0.8634485349510655,
"count": 2
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -28.493801653385162,
"min": -28.493801653385162,
"max": -16.000000834465027,
"count": 2
},
"Pyramids.Policy.CuriosityReward.mean": {
"value": 5.174535974860191,
"min": 5.174535974860191,
"max": 7.066105825826526,
"count": 2
},
"Pyramids.Policy.CuriosityReward.sum": {
"value": 170.75968717038631,
"min": 113.05769321322441,
"max": 170.75968717038631,
"count": 2
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 2
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 2
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1692966038",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Pyramids.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids2 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1692966262"
},
"total": 224.27391621100014,
"count": 1,
"self": 0.6315550570002415,
"children": {
"run_training.setup": {
"total": 0.08380827600012708,
"count": 1,
"self": 0.08380827600012708
},
"TrainerController.start_learning": {
"total": 223.55855287799977,
"count": 1,
"self": 0.13295064800104228,
"children": {
"TrainerController._reset_env": {
"total": 1.920688992000123,
"count": 1,
"self": 1.920688992000123
},
"TrainerController.advance": {
"total": 221.3474071219989,
"count": 3771,
"self": 0.1512690069821474,
"children": {
"env_step": {
"total": 115.23695566300967,
"count": 3771,
"self": 105.65874966700358,
"children": {
"SubprocessEnvManager._take_step": {
"total": 9.493931704998886,
"count": 3771,
"self": 0.5221308799814324,
"children": {
"TorchPolicy.evaluate": {
"total": 8.971800825017453,
"count": 3768,
"self": 8.971800825017453
}
}
},
"workers": {
"total": 0.08427429100720474,
"count": 3771,
"self": 0.0,
"children": {
"worker_root": {
"total": 222.83126691800135,
"count": 3771,
"is_parallel": true,
"self": 127.8539943030014,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0032852160002221353,
"count": 1,
"is_parallel": true,
"self": 0.0009156420001090737,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0023695740001130616,
"count": 8,
"is_parallel": true,
"self": 0.0023695740001130616
}
}
},
"UnityEnvironment.step": {
"total": 0.08681690100002015,
"count": 1,
"is_parallel": true,
"self": 0.0007335120003517659,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0006127110000306857,
"count": 1,
"is_parallel": true,
"self": 0.0006127110000306857
},
"communicator.exchange": {
"total": 0.08257545699962066,
"count": 1,
"is_parallel": true,
"self": 0.08257545699962066
},
"steps_from_proto": {
"total": 0.0028952210000170453,
"count": 1,
"is_parallel": true,
"self": 0.0005566630011344387,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0023385579988826066,
"count": 8,
"is_parallel": true,
"self": 0.0023385579988826066
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 94.97727261499995,
"count": 3770,
"is_parallel": true,
"self": 2.8577311559938607,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 1.614566963998186,
"count": 3770,
"is_parallel": true,
"self": 1.614566963998186
},
"communicator.exchange": {
"total": 81.64266818400392,
"count": 3770,
"is_parallel": true,
"self": 81.64266818400392
},
"steps_from_proto": {
"total": 8.862306311003977,
"count": 3770,
"is_parallel": true,
"self": 1.9190092660101072,
"children": {
"_process_rank_one_or_two_observation": {
"total": 6.943297044993869,
"count": 30160,
"is_parallel": true,
"self": 6.943297044993869
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 105.9591824520071,
"count": 3771,
"self": 0.18197155100051532,
"children": {
"process_trajectory": {
"total": 10.691621337007291,
"count": 3771,
"self": 10.691621337007291
},
"_update_policy": {
"total": 95.08558956399929,
"count": 16,
"self": 57.343644607001806,
"children": {
"TorchPPOOptimizer.update": {
"total": 37.741944956997486,
"count": 1356,
"self": 37.741944956997486
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0899998414970469e-06,
"count": 1,
"self": 1.0899998414970469e-06
},
"TrainerController._save_models": {
"total": 0.1575050259998534,
"count": 1,
"self": 0.0022850939999443654,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15521993199990902,
"count": 1,
"self": 0.15521993199990902
}
}
}
}
}
}
}