ppo-PyramidRND / run_logs /timers.json
andrei-saceleanu's picture
First PyramidRND model
d08766f
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4870789647102356,
"min": 0.4870789647102356,
"max": 1.4195480346679688,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 14659.12890625,
"min": 14659.12890625,
"max": 43063.41015625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989989.0,
"min": 29952.0,
"max": 989989.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989989.0,
"min": 29952.0,
"max": 989989.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5176927447319031,
"min": -0.0922793596982956,
"max": 0.5184658169746399,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 145.98934936523438,
"min": -22.23932647705078,
"max": 145.98934936523438,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.23424449563026428,
"min": -0.10310628265142441,
"max": 0.5476201176643372,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 66.05694580078125,
"min": -27.632484436035156,
"max": 129.7859649658203,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07138944576856578,
"min": 0.06623307030864918,
"max": 0.07363210674137304,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0708416865284867,
"min": 0.5154247471896113,
"max": 1.0708416865284867,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.025924301149401193,
"min": 0.00048368832003252573,
"max": 0.025924301149401193,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.3888645172410179,
"min": 0.0067716364804553605,
"max": 0.3888645172410179,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.4302375232866625e-06,
"min": 7.4302375232866625e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011145356284929994,
"min": 0.00011145356284929994,
"max": 0.003382611272463,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10247671333333332,
"min": 0.10247671333333332,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5371506999999998,
"min": 1.3886848,
"max": 2.527537,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002574236619999999,
"min": 0.0002574236619999999,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0038613549299999985,
"min": 0.0038613549299999985,
"max": 0.1127809463,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011298011057078838,
"min": 0.011298011057078838,
"max": 0.5662930607795715,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1694701611995697,
"min": 0.1589425504207611,
"max": 3.9640514850616455,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 329.5353535353535,
"min": 329.5353535353535,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 32624.0,
"min": 15984.0,
"max": 33195.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6098504904845747,
"min": -1.0000000521540642,
"max": 1.6098504904845747,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 159.3751985579729,
"min": -29.973401620984077,
"max": 159.3751985579729,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6098504904845747,
"min": -1.0000000521540642,
"max": 1.6098504904845747,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 159.3751985579729,
"min": -29.973401620984077,
"max": 159.3751985579729,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03818667876578848,
"min": 0.03818667876578848,
"max": 11.520122053101659,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.78048119781306,
"min": 3.412972043537593,
"max": 184.32195284962654,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673423485",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673425488"
},
"total": 2002.6279427630002,
"count": 1,
"self": 0.4345252070004335,
"children": {
"run_training.setup": {
"total": 0.10943450499985374,
"count": 1,
"self": 0.10943450499985374
},
"TrainerController.start_learning": {
"total": 2002.083983051,
"count": 1,
"self": 1.3102604150390107,
"children": {
"TrainerController._reset_env": {
"total": 6.681162574000155,
"count": 1,
"self": 6.681162574000155
},
"TrainerController.advance": {
"total": 1994.0007494099614,
"count": 63687,
"self": 1.3130575658635735,
"children": {
"env_step": {
"total": 1333.1638626910428,
"count": 63687,
"self": 1227.2990334950237,
"children": {
"SubprocessEnvManager._take_step": {
"total": 105.0450409640473,
"count": 63687,
"self": 4.232242303962039,
"children": {
"TorchPolicy.evaluate": {
"total": 100.81279866008526,
"count": 62578,
"self": 33.58714770214351,
"children": {
"TorchPolicy.sample_actions": {
"total": 67.22565095794175,
"count": 62578,
"self": 67.22565095794175
}
}
}
}
},
"workers": {
"total": 0.8197882319718701,
"count": 63687,
"self": 0.0,
"children": {
"worker_root": {
"total": 1998.207548517047,
"count": 63687,
"is_parallel": true,
"self": 871.1482170631157,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017958550001822005,
"count": 1,
"is_parallel": true,
"self": 0.000588390000302752,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012074649998794484,
"count": 8,
"is_parallel": true,
"self": 0.0012074649998794484
}
}
},
"UnityEnvironment.step": {
"total": 0.047325813000043127,
"count": 1,
"is_parallel": true,
"self": 0.0005578819998390827,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004922860000533547,
"count": 1,
"is_parallel": true,
"self": 0.0004922860000533547
},
"communicator.exchange": {
"total": 0.044572167000069385,
"count": 1,
"is_parallel": true,
"self": 0.044572167000069385
},
"steps_from_proto": {
"total": 0.0017034780000813043,
"count": 1,
"is_parallel": true,
"self": 0.0004456959998151433,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001257782000266161,
"count": 8,
"is_parallel": true,
"self": 0.001257782000266161
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1127.0593314539312,
"count": 63686,
"is_parallel": true,
"self": 28.189231335917384,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.340607755940937,
"count": 63686,
"is_parallel": true,
"self": 22.340607755940937
},
"communicator.exchange": {
"total": 984.2186785150534,
"count": 63686,
"is_parallel": true,
"self": 984.2186785150534
},
"steps_from_proto": {
"total": 92.31081384701952,
"count": 63686,
"is_parallel": true,
"self": 21.85290177514844,
"children": {
"_process_rank_one_or_two_observation": {
"total": 70.45791207187108,
"count": 509488,
"is_parallel": true,
"self": 70.45791207187108
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 659.523829153055,
"count": 63687,
"self": 2.4490298810940203,
"children": {
"process_trajectory": {
"total": 142.77542156296226,
"count": 63687,
"self": 142.58075008296214,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19467148000012457,
"count": 2,
"self": 0.19467148000012457
}
}
},
"_update_policy": {
"total": 514.2993777089987,
"count": 449,
"self": 198.24061496599757,
"children": {
"TorchPPOOptimizer.update": {
"total": 316.0587627430011,
"count": 22791,
"self": 316.0587627430011
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.969996088126209e-07,
"count": 1,
"self": 8.969996088126209e-07
},
"TrainerController._save_models": {
"total": 0.0918097549997583,
"count": 1,
"self": 0.0015095280000423372,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09030022699971596,
"count": 1,
"self": 0.09030022699971596
}
}
}
}
}
}
}