ppo-PyramidsRND / run_logs /timers.json
JessicaHsu's picture
init Pyramids
909a710
raw
history blame contribute delete
No virus
19.2 kB
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4841180741786957,
"min": 0.4770418703556061,
"max": 1.3938826322555542,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 14717.189453125,
"min": 14112.806640625,
"max": 42284.82421875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989987.0,
"min": 29890.0,
"max": 989987.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989987.0,
"min": 29890.0,
"max": 989987.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5993238687515259,
"min": -0.32549071311950684,
"max": 0.6077452301979065,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 167.21136474609375,
"min": -77.14129638671875,
"max": 171.3841552734375,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.003141416935250163,
"min": -0.30730104446411133,
"max": 0.7080830931663513,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -0.8764553070068359,
"min": -78.0544662475586,
"max": 167.81568908691406,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06511705456555707,
"min": 0.06511705456555707,
"max": 0.07418011290014717,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9767558184833559,
"min": 0.5934409032011774,
"max": 1.0611411751015112,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0152793147885758,
"min": 0.0006066122193355063,
"max": 0.029338310884001358,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.229189721828637,
"min": 0.00667273441269057,
"max": 0.44007466326002037,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.4769575077133354e-06,
"min": 7.4769575077133354e-06,
"max": 0.0002947608392463875,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011215436261570004,
"min": 0.00011215436261570004,
"max": 0.003507596330801299,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10249228666666667,
"min": 0.10249228666666667,
"max": 0.1982536125,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5373843,
"min": 1.4779296000000002,
"max": 2.5691987000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002589794380000001,
"min": 0.0002589794380000001,
"max": 0.00982553588875,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003884691570000001,
"min": 0.003884691570000001,
"max": 0.11694295013,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.017012285068631172,
"min": 0.016789482906460762,
"max": 0.6819204092025757,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.25518426299095154,
"min": 0.23505276441574097,
"max": 5.4553632736206055,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 314.0430107526882,
"min": 314.0430107526882,
"max": 992.1875,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29206.0,
"min": 17584.0,
"max": 33322.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6455195550199435,
"min": -0.9303563004359603,
"max": 1.6580505115421196,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 151.3877990618348,
"min": -29.77140161395073,
"max": 157.51479859650135,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6455195550199435,
"min": -0.9303563004359603,
"max": 1.6580505115421196,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 151.3877990618348,
"min": -29.77140161395073,
"max": 157.51479859650135,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.054804565043646704,
"min": 0.054804565043646704,
"max": 14.2016558084223,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 5.0420199840154964,
"min": 5.0420199840154964,
"max": 255.6298045516014,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677941123",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677943415"
},
"total": 2292.505173122,
"count": 1,
"self": 0.5200078720004058,
"children": {
"run_training.setup": {
"total": 0.11890567099999316,
"count": 1,
"self": 0.11890567099999316
},
"TrainerController.start_learning": {
"total": 2291.8662595789997,
"count": 1,
"self": 1.4429241569696387,
"children": {
"TrainerController._reset_env": {
"total": 10.563649079000015,
"count": 1,
"self": 10.563649079000015
},
"TrainerController.advance": {
"total": 2279.7654722620305,
"count": 63741,
"self": 1.5401958020070197,
"children": {
"env_step": {
"total": 1521.5567800880267,
"count": 63741,
"self": 1402.6595376420132,
"children": {
"SubprocessEnvManager._take_step": {
"total": 118.02103697099506,
"count": 63741,
"self": 4.9781059339505305,
"children": {
"TorchPolicy.evaluate": {
"total": 113.04293103704453,
"count": 62562,
"self": 38.39692370605741,
"children": {
"TorchPolicy.sample_actions": {
"total": 74.64600733098712,
"count": 62562,
"self": 74.64600733098712
}
}
}
}
},
"workers": {
"total": 0.8762054750185371,
"count": 63741,
"self": 0.0,
"children": {
"worker_root": {
"total": 2286.7530822410267,
"count": 63741,
"is_parallel": true,
"self": 1005.4881854780306,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007792905000030714,
"count": 1,
"is_parallel": true,
"self": 0.0052833589999750075,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002509546000055707,
"count": 8,
"is_parallel": true,
"self": 0.002509546000055707
}
}
},
"UnityEnvironment.step": {
"total": 0.0726149669999927,
"count": 1,
"is_parallel": true,
"self": 0.0005961959999467581,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004638550000208852,
"count": 1,
"is_parallel": true,
"self": 0.0004638550000208852
},
"communicator.exchange": {
"total": 0.06969753800001399,
"count": 1,
"is_parallel": true,
"self": 0.06969753800001399
},
"steps_from_proto": {
"total": 0.0018573780000110673,
"count": 1,
"is_parallel": true,
"self": 0.000504099999943719,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013532780000673483,
"count": 8,
"is_parallel": true,
"self": 0.0013532780000673483
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1281.2648967629962,
"count": 63740,
"is_parallel": true,
"self": 32.09705234994772,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.780313530040303,
"count": 63740,
"is_parallel": true,
"self": 24.780313530040303
},
"communicator.exchange": {
"total": 1127.1192436039983,
"count": 63740,
"is_parallel": true,
"self": 1127.1192436039983
},
"steps_from_proto": {
"total": 97.26828727901,
"count": 63740,
"is_parallel": true,
"self": 23.4695597550799,
"children": {
"_process_rank_one_or_two_observation": {
"total": 73.7987275239301,
"count": 509920,
"is_parallel": true,
"self": 73.7987275239301
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 756.6684963719969,
"count": 63741,
"self": 2.6297460490510502,
"children": {
"process_trajectory": {
"total": 169.43247389394645,
"count": 63741,
"self": 169.23914006794638,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19333382600007099,
"count": 2,
"self": 0.19333382600007099
}
}
},
"_update_policy": {
"total": 584.6062764289993,
"count": 454,
"self": 226.58819682899542,
"children": {
"TorchPPOOptimizer.update": {
"total": 358.0180796000039,
"count": 22788,
"self": 358.0180796000039
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.590000106778461e-07,
"count": 1,
"self": 8.590000106778461e-07
},
"TrainerController._save_models": {
"total": 0.09421322199978022,
"count": 1,
"self": 0.001479267999457079,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09273395400032314,
"count": 1,
"self": 0.09273395400032314
}
}
}
}
}
}
}