DiegoD616's picture
New model for Pyramids :)
964d905
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5941895246505737,
"min": 0.5480600595474243,
"max": 1.4363446235656738,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 18044.34765625,
"min": 16336.57421875,
"max": 43572.94921875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989933.0,
"min": 29952.0,
"max": 989933.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989933.0,
"min": 29952.0,
"max": 989933.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.21295934915542603,
"min": -0.11717802286148071,
"max": 0.21295934915542603,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 53.87871551513672,
"min": -28.239904403686523,
"max": 53.87871551513672,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.04443557187914848,
"min": 0.014049442484974861,
"max": 0.3255608081817627,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 11.242199897766113,
"min": 3.4983112812042236,
"max": 77.15791320800781,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06576379199118708,
"min": 0.06448957914246874,
"max": 0.07257499537704798,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9206930878766191,
"min": 0.48978261294491704,
"max": 1.0251254467567539,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.010392378317365433,
"min": 8.66717228355367e-05,
"max": 0.010392378317365433,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.14549329644311607,
"min": 0.0011267323968619772,
"max": 0.14549329644311607,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.416697527800002e-06,
"min": 7.416697527800002e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010383376538920002,
"min": 0.00010383376538920002,
"max": 0.0035071553309482992,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10247220000000001,
"min": 0.10247220000000001,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4346108000000002,
"min": 1.3886848,
"max": 2.5690516999999997,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002569727800000001,
"min": 0.0002569727800000001,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035976189200000014,
"min": 0.0035976189200000014,
"max": 0.11692826482999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.014802946709096432,
"min": 0.014572839252650738,
"max": 0.43386051058769226,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.2072412520647049,
"min": 0.20401975512504578,
"max": 3.0370235443115234,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 637.1458333333334,
"min": 607.3846153846154,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30583.0,
"min": 15984.0,
"max": 33045.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.8209999663134416,
"min": -1.0000000521540642,
"max": 0.8209999663134416,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 39.4079983830452,
"min": -30.62960172444582,
"max": 40.40959845483303,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.8209999663134416,
"min": -1.0000000521540642,
"max": 0.8209999663134416,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 39.4079983830452,
"min": -30.62960172444582,
"max": 40.40959845483303,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.09728331148410992,
"min": 0.09507865159125337,
"max": 8.683953858911991,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.669598951237276,
"min": 4.3632136271335185,
"max": 138.94326174259186,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673993358",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ../config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673995247"
},
"total": 1888.9351935659997,
"count": 1,
"self": 0.4802609710004617,
"children": {
"run_training.setup": {
"total": 0.10400923899942427,
"count": 1,
"self": 0.10400923899942427
},
"TrainerController.start_learning": {
"total": 1888.3509233559998,
"count": 1,
"self": 1.1591932069741233,
"children": {
"TrainerController._reset_env": {
"total": 6.152855768000336,
"count": 1,
"self": 6.152855768000336
},
"TrainerController.advance": {
"total": 1880.9508373740255,
"count": 63224,
"self": 1.232069293027962,
"children": {
"env_step": {
"total": 1223.7384940120137,
"count": 63224,
"self": 1121.656141230089,
"children": {
"SubprocessEnvManager._take_step": {
"total": 101.37029868298214,
"count": 63224,
"self": 4.120659484970929,
"children": {
"TorchPolicy.evaluate": {
"total": 97.24963919801121,
"count": 62557,
"self": 33.031433941073374,
"children": {
"TorchPolicy.sample_actions": {
"total": 64.21820525693784,
"count": 62557,
"self": 64.21820525693784
}
}
}
}
},
"workers": {
"total": 0.7120540989426445,
"count": 63224,
"self": 0.0,
"children": {
"worker_root": {
"total": 1884.187928652068,
"count": 63224,
"is_parallel": true,
"self": 856.9269363432495,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017319059998044395,
"count": 1,
"is_parallel": true,
"self": 0.0006245020003916579,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011074039994127816,
"count": 8,
"is_parallel": true,
"self": 0.0011074039994127816
}
}
},
"UnityEnvironment.step": {
"total": 0.04720417000044108,
"count": 1,
"is_parallel": true,
"self": 0.0005116740012454102,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00042078299975401023,
"count": 1,
"is_parallel": true,
"self": 0.00042078299975401023
},
"communicator.exchange": {
"total": 0.044650721999460075,
"count": 1,
"is_parallel": true,
"self": 0.044650721999460075
},
"steps_from_proto": {
"total": 0.0016209909999815864,
"count": 1,
"is_parallel": true,
"self": 0.00039764299799571745,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001223348001985869,
"count": 8,
"is_parallel": true,
"self": 0.001223348001985869
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1027.2609923088185,
"count": 63223,
"is_parallel": true,
"self": 27.164332126661066,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 21.630105264046506,
"count": 63223,
"is_parallel": true,
"self": 21.630105264046506
},
"communicator.exchange": {
"total": 881.8671764701276,
"count": 63223,
"is_parallel": true,
"self": 881.8671764701276
},
"steps_from_proto": {
"total": 96.59937844798333,
"count": 63223,
"is_parallel": true,
"self": 21.048200341686425,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.5511781062969,
"count": 505784,
"is_parallel": true,
"self": 75.5511781062969
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 655.9802740689838,
"count": 63224,
"self": 2.201803113942333,
"children": {
"process_trajectory": {
"total": 141.4174219040433,
"count": 63224,
"self": 141.23944675104394,
"children": {
"RLTrainer._checkpoint": {
"total": 0.17797515299935185,
"count": 2,
"self": 0.17797515299935185
}
}
},
"_update_policy": {
"total": 512.3610490509982,
"count": 443,
"self": 192.5261031070968,
"children": {
"TorchPPOOptimizer.update": {
"total": 319.8349459439014,
"count": 22776,
"self": 319.8349459439014
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0670000847312622e-06,
"count": 1,
"self": 1.0670000847312622e-06
},
"TrainerController._save_models": {
"total": 0.08803593999982695,
"count": 1,
"self": 0.0013523620000341907,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08668357799979276,
"count": 1,
"self": 0.08668357799979276
}
}
}
}
}
}
}