PyramidRND / run_logs /timers.json
varevshatyan's picture
Pyramid with Distillation
d0bf48d
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.36784476041793823,
"min": 0.35954245924949646,
"max": 1.4516931772232056,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10988.2587890625,
"min": 10659.71484375,
"max": 44038.5625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989941.0,
"min": 29952.0,
"max": 989941.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989941.0,
"min": 29952.0,
"max": 989941.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6022236943244934,
"min": -0.2634536027908325,
"max": 0.7436299324035645,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 171.03152465820312,
"min": -62.438507080078125,
"max": 220.1144561767578,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.021023573353886604,
"min": 0.009736931882798672,
"max": 0.3641150891780853,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 5.9706950187683105,
"min": 2.8626580238342285,
"max": 87.75173950195312,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06760139739851535,
"min": 0.0655458662173866,
"max": 0.07251250823457199,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0140209609777302,
"min": 0.5036924442991577,
"max": 1.056789422546988,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.017241750931476844,
"min": 0.000410113303039336,
"max": 0.017330982091759022,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.25862626397215266,
"min": 0.004511246333432696,
"max": 0.2599647313763853,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.549617483493334e-06,
"min": 7.549617483493334e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001132442622524,
"min": 0.0001132442622524,
"max": 0.0033751207749598,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251650666666667,
"min": 0.10251650666666667,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5377476,
"min": 1.3886848,
"max": 2.6175585,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026139901600000004,
"min": 0.00026139901600000004,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003920985240000001,
"min": 0.003920985240000001,
"max": 0.11252151598,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011227217502892017,
"min": 0.011227217502892017,
"max": 0.4900347590446472,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.16840825974941254,
"min": 0.16280758380889893,
"max": 3.4302432537078857,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 322.03092783505156,
"min": 254.1304347826087,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31237.0,
"min": 15984.0,
"max": 33290.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6161030739853062,
"min": -1.0000000521540642,
"max": 1.7110712899462037,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 156.7619981765747,
"min": -30.999201625585556,
"max": 200.03479797393084,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6161030739853062,
"min": -1.0000000521540642,
"max": 1.7110712899462037,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 156.7619981765747,
"min": -30.999201625585556,
"max": 200.03479797393084,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03734687535325065,
"min": 0.03147251646315335,
"max": 9.44350309111178,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.6226469092653133,
"min": 3.4775229908700567,
"max": 151.09604945778847,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679669028",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679671223"
},
"total": 2195.506681027,
"count": 1,
"self": 1.1364900759999728,
"children": {
"run_training.setup": {
"total": 0.1058909500000027,
"count": 1,
"self": 0.1058909500000027
},
"TrainerController.start_learning": {
"total": 2194.2643000010003,
"count": 1,
"self": 1.3105426419751893,
"children": {
"TrainerController._reset_env": {
"total": 6.028638049000165,
"count": 1,
"self": 6.028638049000165
},
"TrainerController.advance": {
"total": 2186.781520820025,
"count": 64366,
"self": 1.4109326760226395,
"children": {
"env_step": {
"total": 1564.1126104469884,
"count": 64366,
"self": 1458.6922093669914,
"children": {
"SubprocessEnvManager._take_step": {
"total": 104.64993639600107,
"count": 64366,
"self": 4.5337851040449095,
"children": {
"TorchPolicy.evaluate": {
"total": 100.11615129195616,
"count": 62568,
"self": 100.11615129195616
}
}
},
"workers": {
"total": 0.7704646839958968,
"count": 64366,
"self": 0.0,
"children": {
"worker_root": {
"total": 2189.5206216200017,
"count": 64366,
"is_parallel": true,
"self": 842.1228176320051,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0016273640001145395,
"count": 1,
"is_parallel": true,
"self": 0.0005289020004966005,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001098461999617939,
"count": 8,
"is_parallel": true,
"self": 0.001098461999617939
}
}
},
"UnityEnvironment.step": {
"total": 0.04744405000019469,
"count": 1,
"is_parallel": true,
"self": 0.0005267920003007021,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004433569999946485,
"count": 1,
"is_parallel": true,
"self": 0.0004433569999946485
},
"communicator.exchange": {
"total": 0.04485337599999184,
"count": 1,
"is_parallel": true,
"self": 0.04485337599999184
},
"steps_from_proto": {
"total": 0.0016205249999075022,
"count": 1,
"is_parallel": true,
"self": 0.0003863289998662367,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012341960000412655,
"count": 8,
"is_parallel": true,
"self": 0.0012341960000412655
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1347.3978039879967,
"count": 64365,
"is_parallel": true,
"self": 30.829521154053964,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.45170786697986,
"count": 64365,
"is_parallel": true,
"self": 22.45170786697986
},
"communicator.exchange": {
"total": 1203.3891572749862,
"count": 64365,
"is_parallel": true,
"self": 1203.3891572749862
},
"steps_from_proto": {
"total": 90.7274176919766,
"count": 64365,
"is_parallel": true,
"self": 19.165637774857487,
"children": {
"_process_rank_one_or_two_observation": {
"total": 71.56177991711911,
"count": 514920,
"is_parallel": true,
"self": 71.56177991711911
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 621.257977697014,
"count": 64366,
"self": 2.479805766977961,
"children": {
"process_trajectory": {
"total": 116.61909988103162,
"count": 64366,
"self": 116.30915141903188,
"children": {
"RLTrainer._checkpoint": {
"total": 0.30994846199973836,
"count": 2,
"self": 0.30994846199973836
}
}
},
"_update_policy": {
"total": 502.1590720490044,
"count": 452,
"self": 318.91935329901,
"children": {
"TorchPPOOptimizer.update": {
"total": 183.23971874999438,
"count": 22818,
"self": 183.23971874999438
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.145000169344712e-06,
"count": 1,
"self": 1.145000169344712e-06
},
"TrainerController._save_models": {
"total": 0.14359734499976184,
"count": 1,
"self": 0.0018170019998251519,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1417803429999367,
"count": 1,
"self": 0.1417803429999367
}
}
}
}
}
}
}