ppo-PyramidsRND / run_logs /timers.json
jucamohedano's picture
Pyramids env first run
2301456
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4371541738510132,
"min": 0.4371541738510132,
"max": 1.447709321975708,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13177.5751953125,
"min": 13177.5751953125,
"max": 43917.7109375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989934.0,
"min": 29952.0,
"max": 989934.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989934.0,
"min": 29952.0,
"max": 989934.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3904217481613159,
"min": -0.11730703711509705,
"max": 0.4008304178714752,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 104.63302612304688,
"min": -28.153688430786133,
"max": 107.02172088623047,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.007560729049146175,
"min": 0.007560729049146175,
"max": 0.2903762757778168,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.026275396347046,
"min": 2.026275396347046,
"max": 69.98068237304688,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06818356998943878,
"min": 0.06476537082620658,
"max": 0.0744347085179912,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.954569979852143,
"min": 0.5210429596259384,
"max": 1.0629619741036247,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.012574989494515604,
"min": 0.0002667785270070738,
"max": 0.013609561172779647,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.17604985292321845,
"min": 0.0032013423240848855,
"max": 0.20414341759169471,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.392054678871431e-06,
"min": 7.392054678871431e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010348876550420004,
"min": 0.00010348876550420004,
"max": 0.003490864636378499,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10246398571428572,
"min": 0.10246398571428572,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4344958,
"min": 1.3886848,
"max": 2.5275128000000002,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025615217285714293,
"min": 0.00025615217285714293,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035861304200000012,
"min": 0.0035861304200000012,
"max": 0.11637578784999998,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01095340121537447,
"min": 0.010662509128451347,
"max": 0.45290905237197876,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15334761142730713,
"min": 0.14927512407302856,
"max": 3.170363426208496,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 445.030303030303,
"min": 445.030303030303,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29372.0,
"min": 15984.0,
"max": 34032.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4640363398374934,
"min": -1.0000000521540642,
"max": 1.4640363398374934,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 96.62639842927456,
"min": -31.996801674365997,
"max": 96.62639842927456,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4640363398374934,
"min": -1.0000000521540642,
"max": 1.4640363398374934,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 96.62639842927456,
"min": -31.996801674365997,
"max": 96.62639842927456,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05050653808222816,
"min": 0.04970219661312961,
"max": 9.314574934542179,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.3334315134270582,
"min": 3.2879879637039267,
"max": 149.03319895267487,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1685868074",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1685870278"
},
"total": 2204.057395255,
"count": 1,
"self": 0.48653779999995095,
"children": {
"run_training.setup": {
"total": 0.04233597600000394,
"count": 1,
"self": 0.04233597600000394
},
"TrainerController.start_learning": {
"total": 2203.528521479,
"count": 1,
"self": 1.3498361630158797,
"children": {
"TrainerController._reset_env": {
"total": 3.9726607259999582,
"count": 1,
"self": 3.9726607259999582
},
"TrainerController.advance": {
"total": 2198.1121879879843,
"count": 63476,
"self": 1.28326596901843,
"children": {
"env_step": {
"total": 1572.7254380020097,
"count": 63476,
"self": 1469.1050416180094,
"children": {
"SubprocessEnvManager._take_step": {
"total": 102.84468383901151,
"count": 63476,
"self": 4.667240634010511,
"children": {
"TorchPolicy.evaluate": {
"total": 98.177443205001,
"count": 62558,
"self": 98.177443205001
}
}
},
"workers": {
"total": 0.7757125449888349,
"count": 63476,
"self": 0.0,
"children": {
"worker_root": {
"total": 2198.730133192004,
"count": 63476,
"is_parallel": true,
"self": 839.3724559900386,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005159686000013153,
"count": 1,
"is_parallel": true,
"self": 0.003764543999864145,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013951420001490078,
"count": 8,
"is_parallel": true,
"self": 0.0013951420001490078
}
}
},
"UnityEnvironment.step": {
"total": 0.054857802000015,
"count": 1,
"is_parallel": true,
"self": 0.0005523090001133824,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005269149999662659,
"count": 1,
"is_parallel": true,
"self": 0.0005269149999662659
},
"communicator.exchange": {
"total": 0.05176073899997391,
"count": 1,
"is_parallel": true,
"self": 0.05176073899997391
},
"steps_from_proto": {
"total": 0.002017838999961441,
"count": 1,
"is_parallel": true,
"self": 0.0004486429999133179,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001569196000048123,
"count": 8,
"is_parallel": true,
"self": 0.001569196000048123
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1359.3576772019653,
"count": 63475,
"is_parallel": true,
"self": 31.780383560045266,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.730661344931377,
"count": 63475,
"is_parallel": true,
"self": 22.730661344931377
},
"communicator.exchange": {
"total": 1208.2888097170128,
"count": 63475,
"is_parallel": true,
"self": 1208.2888097170128
},
"steps_from_proto": {
"total": 96.55782257997589,
"count": 63475,
"is_parallel": true,
"self": 19.49321567102595,
"children": {
"_process_rank_one_or_two_observation": {
"total": 77.06460690894994,
"count": 507800,
"is_parallel": true,
"self": 77.06460690894994
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 624.1034840169561,
"count": 63476,
"self": 2.4450470079584647,
"children": {
"process_trajectory": {
"total": 104.43148108800216,
"count": 63476,
"self": 104.16413178700242,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2673493009997401,
"count": 2,
"self": 0.2673493009997401
}
}
},
"_update_policy": {
"total": 517.2269559209956,
"count": 449,
"self": 335.0815561780138,
"children": {
"TorchPPOOptimizer.update": {
"total": 182.1453997429818,
"count": 22797,
"self": 182.1453997429818
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.179998414765578e-07,
"count": 1,
"self": 9.179998414765578e-07
},
"TrainerController._save_models": {
"total": 0.09383568400016884,
"count": 1,
"self": 0.0014840770004411752,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09235160699972766,
"count": 1,
"self": 0.09235160699972766
}
}
}
}
}
}
}