kingabzpro's picture
First Pyramids
1734853
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.43753165006637573,
"min": 0.41744309663772583,
"max": 1.3924976587295532,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12978.9384765625,
"min": 12509.9345703125,
"max": 35363.59375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989883.0,
"min": 29952.0,
"max": 989883.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989883.0,
"min": 29952.0,
"max": 989883.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4682513177394867,
"min": -0.10345122218132019,
"max": 0.48039451241493225,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 125.4913558959961,
"min": -24.828292846679688,
"max": 128.7457275390625,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.17259953916072845,
"min": -0.17259953916072845,
"max": 0.41910725831985474,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -46.256675720214844,
"min": -46.256675720214844,
"max": 79.21127319335938,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0685792949221864,
"min": 0.06272136655198189,
"max": 0.07298683998682191,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.028689423832796,
"min": 0.3488453659372827,
"max": 1.0930936766574937,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.018928470855937917,
"min": 0.0005183195692592675,
"max": 0.018928470855937917,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.28392706283906877,
"min": 0.004664876123333407,
"max": 0.28392706283906877,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.459997513366668e-06,
"min": 7.459997513366668e-06,
"max": 0.00029451648182784,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011189996270050003,
"min": 0.00011189996270050003,
"max": 0.0032591127136291998,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10248663333333334,
"min": 0.10248663333333334,
"max": 0.19817216,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5372995,
"min": 0.9908608000000001,
"max": 2.4863708000000004,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002584146700000001,
"min": 0.0002584146700000001,
"max": 0.009817398784,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003876220050000001,
"min": 0.003876220050000001,
"max": 0.10866844291999998,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010363002307713032,
"min": 0.010363002307713032,
"max": 0.37137073278427124,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15544503927230835,
"min": 0.1496373414993286,
"max": 1.856853723526001,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 384.68831168831167,
"min": 384.68831168831167,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29621.0,
"min": 15984.0,
"max": 35042.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4876025445186174,
"min": -1.0000000521540642,
"max": 1.5380239822467168,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 116.03299847245216,
"min": -31.998401656746864,
"max": 118.06099843233824,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4876025445186174,
"min": -1.0000000521540642,
"max": 1.5380239822467168,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 116.03299847245216,
"min": -31.998401656746864,
"max": 118.06099843233824,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.0413381181114789,
"min": 0.0413381181114789,
"max": 5.869423013180494,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.2243732126953546,
"min": 3.2243732126953546,
"max": 93.91076821088791,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1656870710",
"python_version": "3.7.13 (default, Apr 24 2022, 01:04:09) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --resume --env=./trained-envs-executables/windows/Pyramids --run-id=First Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1656872850"
},
"total": 2140.177695962,
"count": 1,
"self": 0.4764530160000504,
"children": {
"run_training.setup": {
"total": 0.042247865000717866,
"count": 1,
"self": 0.042247865000717866
},
"TrainerController.start_learning": {
"total": 2139.6589950809994,
"count": 1,
"self": 1.4866335599444938,
"children": {
"TrainerController._reset_env": {
"total": 6.446935760000088,
"count": 1,
"self": 6.446935760000088
},
"TrainerController.advance": {
"total": 2131.620224490055,
"count": 63321,
"self": 1.5641444211287308,
"children": {
"env_step": {
"total": 1406.6707451807952,
"count": 63321,
"self": 1294.4871764377604,
"children": {
"SubprocessEnvManager._take_step": {
"total": 111.41859657908026,
"count": 63321,
"self": 4.920382213256744,
"children": {
"TorchPolicy.evaluate": {
"total": 106.49821436582351,
"count": 62194,
"self": 36.555733122810125,
"children": {
"TorchPolicy.sample_actions": {
"total": 69.94248124301339,
"count": 62194,
"self": 69.94248124301339
}
}
}
}
},
"workers": {
"total": 0.7649721639545533,
"count": 63321,
"self": 0.0,
"children": {
"worker_root": {
"total": 2135.542165260008,
"count": 63321,
"is_parallel": true,
"self": 948.1408273369761,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0023908030007078196,
"count": 1,
"is_parallel": true,
"self": 0.0008587640004407149,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015320390002671047,
"count": 8,
"is_parallel": true,
"self": 0.0015320390002671047
}
}
},
"UnityEnvironment.step": {
"total": 0.04604570399988006,
"count": 1,
"is_parallel": true,
"self": 0.0005683689996658359,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005049339997640345,
"count": 1,
"is_parallel": true,
"self": 0.0005049339997640345
},
"communicator.exchange": {
"total": 0.04321112700017693,
"count": 1,
"is_parallel": true,
"self": 0.04321112700017693
},
"steps_from_proto": {
"total": 0.0017612740002732608,
"count": 1,
"is_parallel": true,
"self": 0.00045008699999016244,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013111870002830983,
"count": 8,
"is_parallel": true,
"self": 0.0013111870002830983
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1187.4013379230319,
"count": 63320,
"is_parallel": true,
"self": 28.375593604262576,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 26.08925435607125,
"count": 63320,
"is_parallel": true,
"self": 26.08925435607125
},
"communicator.exchange": {
"total": 1033.1733856198352,
"count": 63320,
"is_parallel": true,
"self": 1033.1733856198352
},
"steps_from_proto": {
"total": 99.76310434286279,
"count": 63320,
"is_parallel": true,
"self": 24.67261493382466,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.09048940903813,
"count": 506560,
"is_parallel": true,
"self": 75.09048940903813
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 723.3853348881312,
"count": 63321,
"self": 2.6235942361554407,
"children": {
"process_trajectory": {
"total": 173.39631310197728,
"count": 63321,
"self": 173.18222002597759,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21409307599969907,
"count": 2,
"self": 0.21409307599969907
}
}
},
"_update_policy": {
"total": 547.3654275499985,
"count": 445,
"self": 213.5311880339341,
"children": {
"TorchPPOOptimizer.update": {
"total": 333.8342395160644,
"count": 22689,
"self": 333.8342395160644
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2740001693600789e-06,
"count": 1,
"self": 1.2740001693600789e-06
},
"TrainerController._save_models": {
"total": 0.10519999699954496,
"count": 1,
"self": 0.002664577999894391,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10253541899965057,
"count": 1,
"self": 0.10253541899965057
}
}
}
}
}
}
}