ppo-Pyramidsv1 / run_logs /timers.json
draziert's picture
Train run v1
e8fd785
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6450259685516357,
"min": 0.6410074830055237,
"max": 1.5322463512420654,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 19216.61328125,
"min": 19127.6640625,
"max": 46482.2265625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989918.0,
"min": 29952.0,
"max": 989918.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989918.0,
"min": 29952.0,
"max": 989918.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.22885049879550934,
"min": -0.11402715742588043,
"max": 0.24656739830970764,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 57.44147491455078,
"min": -27.024436950683594,
"max": 62.62812042236328,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 3.2871368603082374e-05,
"min": -0.007853555493056774,
"max": 0.1551777571439743,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 0.008250713348388672,
"min": -2.0026566982269287,
"max": 36.777130126953125,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06732209784522031,
"min": 0.06732209784522031,
"max": 0.07421415945623422,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0098314676783047,
"min": 0.49244108266710984,
"max": 1.0562169675715265,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.008960935817109808,
"min": 3.054372120455332e-05,
"max": 0.008960935817109808,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.13441403725664713,
"min": 0.0003970683756591932,
"max": 0.13441403725664713,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.2378564190973333e-05,
"min": 1.2378564190973333e-05,
"max": 0.0004919177159021714,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001856784628646,
"min": 0.0001856784628646,
"max": 0.005421213815757299,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10247569333333334,
"min": 0.10247569333333334,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5371354000000002,
"min": 1.3691136000000002,
"max": 2.4842427000000007,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.000257321764,
"min": 0.000257321764,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00385982646,
"min": 0.00385982646,
"max": 0.10845584573,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.0024546144995838404,
"min": 0.0024546144995838404,
"max": 0.07493139803409576,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.0368192158639431,
"min": 0.0354711078107357,
"max": 0.5245198011398315,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 687.0666666666667,
"min": 660.75,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30918.0,
"min": 15984.0,
"max": 32179.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.7630272400988773,
"min": -1.0000000521540642,
"max": 0.9755363293330778,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 33.573198564350605,
"min": -32.000001668930054,
"max": 42.92359849065542,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.7630272400988773,
"min": -1.0000000521540642,
"max": 0.9755363293330778,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 33.573198564350605,
"min": -32.000001668930054,
"max": 42.92359849065542,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.019590981404193866,
"min": 0.019441925362116308,
"max": 4.322437385097146,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 0.8620031817845302,
"min": 0.8554447159331175,
"max": 69.15899816155434,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690026003",
"python_version": "3.10.6 (main, May 29 2023, 11:10:38) [GCC 11.3.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1690028497"
},
"total": 2493.795741709,
"count": 1,
"self": 0.4908949479995499,
"children": {
"run_training.setup": {
"total": 0.03342672200051311,
"count": 1,
"self": 0.03342672200051311
},
"TrainerController.start_learning": {
"total": 2493.271420039,
"count": 1,
"self": 1.538290237860565,
"children": {
"TrainerController._reset_env": {
"total": 5.724531943000329,
"count": 1,
"self": 5.724531943000329
},
"TrainerController.advance": {
"total": 2485.913467538141,
"count": 63278,
"self": 1.5527100442959636,
"children": {
"env_step": {
"total": 1451.5818918200293,
"count": 63278,
"self": 1333.24871613504,
"children": {
"SubprocessEnvManager._take_step": {
"total": 117.38603611204053,
"count": 63278,
"self": 5.157252747841994,
"children": {
"TorchPolicy.evaluate": {
"total": 112.22878336419853,
"count": 62561,
"self": 112.22878336419853
}
}
},
"workers": {
"total": 0.9471395729487995,
"count": 63278,
"self": 0.0,
"children": {
"worker_root": {
"total": 2488.0286356773104,
"count": 63278,
"is_parallel": true,
"self": 1277.8358804564432,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.003004457999850274,
"count": 1,
"is_parallel": true,
"self": 0.0008506439990014769,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002153814000848797,
"count": 8,
"is_parallel": true,
"self": 0.002153814000848797
}
}
},
"UnityEnvironment.step": {
"total": 0.053261887000189745,
"count": 1,
"is_parallel": true,
"self": 0.0005864399972779211,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005664620002789889,
"count": 1,
"is_parallel": true,
"self": 0.0005664620002789889
},
"communicator.exchange": {
"total": 0.0501881880009023,
"count": 1,
"is_parallel": true,
"self": 0.0501881880009023
},
"steps_from_proto": {
"total": 0.0019207970017305342,
"count": 1,
"is_parallel": true,
"self": 0.0003889070012519369,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015318900004785974,
"count": 8,
"is_parallel": true,
"self": 0.0015318900004785974
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1210.1927552208672,
"count": 63277,
"is_parallel": true,
"self": 35.84065835011279,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.53304013212255,
"count": 63277,
"is_parallel": true,
"self": 25.53304013212255
},
"communicator.exchange": {
"total": 1037.242527930919,
"count": 63277,
"is_parallel": true,
"self": 1037.242527930919
},
"steps_from_proto": {
"total": 111.57652880771275,
"count": 63277,
"is_parallel": true,
"self": 22.968402393804354,
"children": {
"_process_rank_one_or_two_observation": {
"total": 88.6081264139084,
"count": 506216,
"is_parallel": true,
"self": 88.6081264139084
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1032.7788656738157,
"count": 63278,
"self": 2.669245314771615,
"children": {
"process_trajectory": {
"total": 118.07764391304227,
"count": 63278,
"self": 117.79136581504281,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2862780979994568,
"count": 2,
"self": 0.2862780979994568
}
}
},
"_update_policy": {
"total": 912.0319764460019,
"count": 439,
"self": 596.0920037379474,
"children": {
"TorchPPOOptimizer.update": {
"total": 315.9399727080545,
"count": 38105,
"self": 315.9399727080545
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0249987099086866e-06,
"count": 1,
"self": 1.0249987099086866e-06
},
"TrainerController._save_models": {
"total": 0.09512929499942402,
"count": 1,
"self": 0.0012831679996452294,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09384612699977879,
"count": 1,
"self": 0.09384612699977879
}
}
}
}
}
}
}