ppo-Pyramids / run_logs /timers.json
iamjoy's picture
First push
377d771
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3605869710445404,
"min": 0.3400476276874542,
"max": 1.458526372909546,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10731.068359375,
"min": 10190.546875,
"max": 44245.85546875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989873.0,
"min": 29952.0,
"max": 989873.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989873.0,
"min": 29952.0,
"max": 989873.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.2841116786003113,
"min": -0.11605717241764069,
"max": 0.3512202799320221,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 73.86903381347656,
"min": -28.201892852783203,
"max": 91.31727600097656,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.027897167950868607,
"min": -0.004961548838764429,
"max": 0.3119995594024658,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 7.253263473510742,
"min": -1.257606029510498,
"max": 75.19189453125,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06909028818370731,
"min": 0.06531761152398913,
"max": 0.07433437728988272,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9672640345719024,
"min": 0.5203406410291791,
"max": 1.0356888846246646,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.012670259417766927,
"min": 6.385160697513942e-05,
"max": 0.012670259417766927,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.17738363184873698,
"min": 0.0008300708906768125,
"max": 0.17738363184873698,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.543661771192856e-06,
"min": 7.543661771192856e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010561126479669998,
"min": 0.00010561126479669998,
"max": 0.0031393613535463,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251452142857141,
"min": 0.10251452142857141,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4352032999999997,
"min": 1.3886848,
"max": 2.3464537,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002612006907142857,
"min": 0.0002612006907142857,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036568096699999995,
"min": 0.0036568096699999995,
"max": 0.10467072462999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009585014544427395,
"min": 0.009585014544427395,
"max": 0.3893595337867737,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.13419020175933838,
"min": 0.13419020175933838,
"max": 2.7255167961120605,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 521.5357142857143,
"min": 521.5357142857143,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29206.0,
"min": 15984.0,
"max": 32372.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.2641142514933432,
"min": -1.0000000521540642,
"max": 1.2641142514933432,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 70.79039808362722,
"min": -31.998401656746864,
"max": 70.79039808362722,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.2641142514933432,
"min": -1.0000000521540642,
"max": 1.2641142514933432,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 70.79039808362722,
"min": -31.998401656746864,
"max": 70.79039808362722,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.052050619646122086,
"min": 0.052050619646122086,
"max": 7.829729118384421,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.914834700182837,
"min": 2.914834700182837,
"max": 125.27566589415073,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1683885667",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1683887676"
},
"total": 2009.0701628230004,
"count": 1,
"self": 1.0030176630002643,
"children": {
"run_training.setup": {
"total": 0.056257677000303374,
"count": 1,
"self": 0.056257677000303374
},
"TrainerController.start_learning": {
"total": 2008.0108874829998,
"count": 1,
"self": 1.2968964599372157,
"children": {
"TrainerController._reset_env": {
"total": 4.69990780299986,
"count": 1,
"self": 4.69990780299986
},
"TrainerController.advance": {
"total": 2001.869059152063,
"count": 63293,
"self": 1.3383174540635991,
"children": {
"env_step": {
"total": 1366.1776765859795,
"count": 63293,
"self": 1257.7292131820864,
"children": {
"SubprocessEnvManager._take_step": {
"total": 107.65524394289059,
"count": 63293,
"self": 4.7127226299153335,
"children": {
"TorchPolicy.evaluate": {
"total": 102.94252131297526,
"count": 62560,
"self": 102.94252131297526
}
}
},
"workers": {
"total": 0.7932194610025363,
"count": 63293,
"self": 0.0,
"children": {
"worker_root": {
"total": 2003.0740155279987,
"count": 63293,
"is_parallel": true,
"self": 855.2044149290041,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002719808000165358,
"count": 1,
"is_parallel": true,
"self": 0.0007502739999836194,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019695340001817385,
"count": 8,
"is_parallel": true,
"self": 0.0019695340001817385
}
}
},
"UnityEnvironment.step": {
"total": 0.04743323899992902,
"count": 1,
"is_parallel": true,
"self": 0.0005395749999479449,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004623479999281699,
"count": 1,
"is_parallel": true,
"self": 0.0004623479999281699
},
"communicator.exchange": {
"total": 0.04468808399997215,
"count": 1,
"is_parallel": true,
"self": 0.04468808399997215
},
"steps_from_proto": {
"total": 0.0017432320000807522,
"count": 1,
"is_parallel": true,
"self": 0.00035821199981000973,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013850200002707425,
"count": 8,
"is_parallel": true,
"self": 0.0013850200002707425
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1147.8696005989946,
"count": 63292,
"is_parallel": true,
"self": 31.192967495954235,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.526004632028616,
"count": 63292,
"is_parallel": true,
"self": 22.526004632028616
},
"communicator.exchange": {
"total": 998.0063941910448,
"count": 63292,
"is_parallel": true,
"self": 998.0063941910448
},
"steps_from_proto": {
"total": 96.1442342799669,
"count": 63292,
"is_parallel": true,
"self": 19.33867356187511,
"children": {
"_process_rank_one_or_two_observation": {
"total": 76.80556071809178,
"count": 506336,
"is_parallel": true,
"self": 76.80556071809178
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 634.35306511202,
"count": 63293,
"self": 2.3382927920633847,
"children": {
"process_trajectory": {
"total": 105.02987556895368,
"count": 63293,
"self": 104.77374332895306,
"children": {
"RLTrainer._checkpoint": {
"total": 0.25613224000062473,
"count": 2,
"self": 0.25613224000062473
}
}
},
"_update_policy": {
"total": 526.9848967510029,
"count": 435,
"self": 338.41633015003026,
"children": {
"TorchPPOOptimizer.update": {
"total": 188.56856660097264,
"count": 22902,
"self": 188.56856660097264
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1310003174003214e-06,
"count": 1,
"self": 1.1310003174003214e-06
},
"TrainerController._save_models": {
"total": 0.14502293699933944,
"count": 1,
"self": 0.0018920349994004937,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14313090199993894,
"count": 1,
"self": 0.14313090199993894
}
}
}
}
}
}
}