D0k-tor's picture
First Push
4403b61
raw history blame
No virus
18.7 kB
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.39619481563568115,
"min": 0.39619481563568115,
"max": 1.4689055681228638,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11860.48828125,
"min": 11860.48828125,
"max": 44560.71875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989965.0,
"min": 29993.0,
"max": 989965.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989965.0,
"min": 29993.0,
"max": 989965.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6354079246520996,
"min": -0.11585243791341782,
"max": 0.7194526195526123,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 179.82044982910156,
"min": -28.03628921508789,
"max": 207.92181396484375,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.03358595445752144,
"min": -0.007329344283789396,
"max": 0.47765645384788513,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 9.5048246383667,
"min": -2.0155696868896484,
"max": 113.68223571777344,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06819572646885977,
"min": 0.06329972434676685,
"max": 0.07334274620890593,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9547401705640368,
"min": 0.4922901528428146,
"max": 1.0642148073261612,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016453607275685674,
"min": 0.000383299948966835,
"max": 0.016453607275685674,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23035050185959943,
"min": 0.004982899336568855,
"max": 0.2384712323962994,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.315340418728572e-06,
"min": 7.315340418728572e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001024147658622,
"min": 0.0001024147658622,
"max": 0.0037591183469605997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1024384142857143,
"min": 0.1024384142857143,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4341378000000002,
"min": 1.3886848,
"max": 2.653039400000001,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002535975871428572,
"min": 0.0002535975871428572,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035503662200000014,
"min": 0.0035503662200000014,
"max": 0.12531863606000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011308408342301846,
"min": 0.011308408342301846,
"max": 0.48652711510658264,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1583177149295807,
"min": 0.1583177149295807,
"max": 3.4056897163391113,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 298.5940594059406,
"min": 272.08181818181816,
"max": 987.7647058823529,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30158.0,
"min": 16792.0,
"max": 33707.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6607559840381145,
"min": -0.8710588725174174,
"max": 1.709727252681147,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 166.07559840381145,
"min": -26.624001689255238,
"max": 188.06999779492617,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6607559840381145,
"min": -0.8710588725174174,
"max": 1.709727252681147,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 166.07559840381145,
"min": -26.624001689255238,
"max": 188.06999779492617,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03502566572467913,
"min": 0.03461679298925446,
"max": 9.2144193333738,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.502566572467913,
"min": 3.464585807574622,
"max": 156.64512866735458,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678694939",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1678699003"
},
"total": 4063.7463600270003,
"count": 1,
"self": 1.8777746780006055,
"children": {
"run_training.setup": {
"total": 0.14475956899991616,
"count": 1,
"self": 0.14475956899991616
},
"TrainerController.start_learning": {
"total": 4061.72382578,
"count": 1,
"self": 3.131588217947865,
"children": {
"TrainerController._reset_env": {
"total": 5.387584284000013,
"count": 1,
"self": 5.387584284000013
},
"TrainerController.advance": {
"total": 4053.044260323053,
"count": 64131,
"self": 3.05466760121908,
"children": {
"env_step": {
"total": 2752.2836712569588,
"count": 64131,
"self": 2579.320178312005,
"children": {
"SubprocessEnvManager._take_step": {
"total": 171.14855542691544,
"count": 64131,
"self": 8.877685316802172,
"children": {
"TorchPolicy.evaluate": {
"total": 162.27087011011326,
"count": 62554,
"self": 162.27087011011326
}
}
},
"workers": {
"total": 1.814937518038505,
"count": 64131,
"self": 0.0,
"children": {
"worker_root": {
"total": 4052.2576724559713,
"count": 64131,
"is_parallel": true,
"self": 1693.5504535850164,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0032108440000229166,
"count": 1,
"is_parallel": true,
"self": 0.0013128440000400587,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018979999999828578,
"count": 8,
"is_parallel": true,
"self": 0.0018979999999828578
}
}
},
"UnityEnvironment.step": {
"total": 0.1587858740001593,
"count": 1,
"is_parallel": true,
"self": 0.0007447239997873112,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000616806000152792,
"count": 1,
"is_parallel": true,
"self": 0.000616806000152792
},
"communicator.exchange": {
"total": 0.15507623300004525,
"count": 1,
"is_parallel": true,
"self": 0.15507623300004525
},
"steps_from_proto": {
"total": 0.002348111000173958,
"count": 1,
"is_parallel": true,
"self": 0.0005828180003391026,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017652929998348554,
"count": 8,
"is_parallel": true,
"self": 0.0017652929998348554
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2358.707218870955,
"count": 64130,
"is_parallel": true,
"self": 48.47105964390221,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 29.926909965058712,
"count": 64130,
"is_parallel": true,
"self": 29.926909965058712
},
"communicator.exchange": {
"total": 2137.0491168310086,
"count": 64130,
"is_parallel": true,
"self": 2137.0491168310086
},
"steps_from_proto": {
"total": 143.26013243098532,
"count": 64130,
"is_parallel": true,
"self": 33.65056198498337,
"children": {
"_process_rank_one_or_two_observation": {
"total": 109.60957044600195,
"count": 513040,
"is_parallel": true,
"self": 109.60957044600195
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1297.7059214648755,
"count": 64131,
"self": 6.017780109816158,
"children": {
"process_trajectory": {
"total": 197.13097583605736,
"count": 64131,
"self": 196.83707804705796,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2938977889994021,
"count": 2,
"self": 0.2938977889994021
}
}
},
"_update_policy": {
"total": 1094.557165519002,
"count": 458,
"self": 448.0128166050333,
"children": {
"TorchPPOOptimizer.update": {
"total": 646.5443489139686,
"count": 22806,
"self": 646.5443489139686
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4729994290973991e-06,
"count": 1,
"self": 1.4729994290973991e-06
},
"TrainerController._save_models": {
"total": 0.16039148199979536,
"count": 1,
"self": 0.0036048259999006405,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15678665599989472,
"count": 1,
"self": 0.15678665599989472
}
}
}
}
}
}
}