sadra-barikbin's picture
First Push
9f235af
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5548283457756042,
"min": 0.5478066205978394,
"max": 1.343166470527649,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 16618.21875,
"min": 16372.84375,
"max": 40746.296875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989914.0,
"min": 29952.0,
"max": 989914.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989914.0,
"min": 29952.0,
"max": 989914.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.009701108559966087,
"min": -0.027501562610268593,
"max": 0.006844534073024988,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -2.3379671573638916,
"min": -6.6278767585754395,
"max": 1.6563771963119507,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.042118024080991745,
"min": 0.038220711052417755,
"max": 0.714160144329071,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 10.150444030761719,
"min": 9.184642791748047,
"max": 169.25595092773438,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06937680793537253,
"min": 0.06544931462943242,
"max": 0.07417337220178975,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9712753110952154,
"min": 0.5091262181281466,
"max": 1.0500981600632135,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0006671878213796112,
"min": 2.429345335206178e-05,
"max": 0.012193126144808041,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.009340629499314556,
"min": 0.0003401083469288649,
"max": 0.08535188301365629,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.775904550921427e-06,
"min": 7.775904550921427e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010886266371289999,
"min": 0.00010886266371289999,
"max": 0.0032546177151274998,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10259193571428572,
"min": 0.10259193571428572,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4362871000000001,
"min": 1.3691136000000002,
"max": 2.4844622,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026893437785714284,
"min": 0.00026893437785714284,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037650812899999997,
"min": 0.0037650812899999997,
"max": 0.10850876275,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.043518248945474625,
"min": 0.04106331989169121,
"max": 0.6545859575271606,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.6092554926872253,
"min": 0.5748865008354187,
"max": 4.582101821899414,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 972.2903225806451,
"min": 896.5,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30141.0,
"min": 15984.0,
"max": 32374.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.7146581159003319,
"min": -1.0000000521540642,
"max": -0.47576367697029404,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -22.15440159291029,
"min": -32.000001668930054,
"max": -15.700201340019703,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.7146581159003319,
"min": -1.0000000521540642,
"max": -0.47576367697029404,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -22.15440159291029,
"min": -32.000001668930054,
"max": -15.700201340019703,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.4264755182554044,
"min": 0.3975419149254308,
"max": 13.446727706119418,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 13.220741065917537,
"min": 11.865816544741392,
"max": 215.1476432979107,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1684910582",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --torch-device cuda:0",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1684912515"
},
"total": 1932.5416005400002,
"count": 1,
"self": 0.49198436700021375,
"children": {
"run_training.setup": {
"total": 0.06565657700002703,
"count": 1,
"self": 0.06565657700002703
},
"TrainerController.start_learning": {
"total": 1931.983959596,
"count": 1,
"self": 1.264382122010602,
"children": {
"TrainerController._reset_env": {
"total": 4.561444543000107,
"count": 1,
"self": 4.561444543000107
},
"TrainerController.advance": {
"total": 1926.068758600989,
"count": 63289,
"self": 1.2637379550058085,
"children": {
"env_step": {
"total": 1299.3724154320362,
"count": 63289,
"self": 1196.1722497859942,
"children": {
"SubprocessEnvManager._take_step": {
"total": 102.44318881602885,
"count": 63289,
"self": 4.566027193024638,
"children": {
"TorchPolicy.evaluate": {
"total": 97.87716162300421,
"count": 62564,
"self": 97.87716162300421
}
}
},
"workers": {
"total": 0.7569768300131727,
"count": 63289,
"self": 0.0,
"children": {
"worker_root": {
"total": 1927.5808488870637,
"count": 63289,
"is_parallel": true,
"self": 837.8230016470454,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0025692060000892525,
"count": 1,
"is_parallel": true,
"self": 0.0007157379998261604,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001853468000263092,
"count": 8,
"is_parallel": true,
"self": 0.001853468000263092
}
}
},
"UnityEnvironment.step": {
"total": 0.04331913600003645,
"count": 1,
"is_parallel": true,
"self": 0.0005240830000730057,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000465892999955031,
"count": 1,
"is_parallel": true,
"self": 0.000465892999955031
},
"communicator.exchange": {
"total": 0.04062580599998,
"count": 1,
"is_parallel": true,
"self": 0.04062580599998
},
"steps_from_proto": {
"total": 0.001703354000028412,
"count": 1,
"is_parallel": true,
"self": 0.00034590500013109704,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001357448999897315,
"count": 8,
"is_parallel": true,
"self": 0.001357448999897315
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1089.7578472400182,
"count": 63288,
"is_parallel": true,
"self": 30.82191178993162,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 21.959418965995837,
"count": 63288,
"is_parallel": true,
"self": 21.959418965995837
},
"communicator.exchange": {
"total": 942.4144531270224,
"count": 63288,
"is_parallel": true,
"self": 942.4144531270224
},
"steps_from_proto": {
"total": 94.56206335706838,
"count": 63288,
"is_parallel": true,
"self": 18.956046673184574,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.60601668388381,
"count": 506304,
"is_parallel": true,
"self": 75.60601668388381
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 625.432605213947,
"count": 63289,
"self": 2.3884990449275847,
"children": {
"process_trajectory": {
"total": 102.42527161102225,
"count": 63289,
"self": 102.22635177602206,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19891983500019705,
"count": 2,
"self": 0.19891983500019705
}
}
},
"_update_policy": {
"total": 520.6188345579972,
"count": 444,
"self": 339.1423938050168,
"children": {
"TorchPPOOptimizer.update": {
"total": 181.47644075298035,
"count": 22737,
"self": 181.47644075298035
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.589998626324814e-07,
"count": 1,
"self": 9.589998626324814e-07
},
"TrainerController._save_models": {
"total": 0.08937337100041987,
"count": 1,
"self": 0.0013874860005671508,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08798588499985271,
"count": 1,
"self": 0.08798588499985271
}
}
}
}
}
}
}