ppo-Pyramids / run_logs /timers.json
LukeSajkowski's picture
First training
57aa293
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.43488427996635437,
"min": 0.43488427996635437,
"max": 1.4722023010253906,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13102.193359375,
"min": 13102.193359375,
"max": 44660.73046875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989981.0,
"min": 29952.0,
"max": 989981.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989981.0,
"min": 29952.0,
"max": 989981.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4481279253959656,
"min": -0.14018329977989197,
"max": 0.4839859902858734,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 120.99453735351562,
"min": -33.22344207763672,
"max": 132.128173828125,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.017161570489406586,
"min": -0.017161570489406586,
"max": 0.4350193440914154,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -4.633624076843262,
"min": -4.633624076843262,
"max": 103.0995864868164,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06949028487170358,
"min": 0.06478312003171251,
"max": 0.07169061435831003,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9728639882038502,
"min": 0.4854648618590242,
"max": 1.051295583983244,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013424040664436431,
"min": 0.00010608506057915977,
"max": 0.014305960034260412,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.18793656930211003,
"min": 0.001379105787529077,
"max": 0.20028344047964577,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.558897480400005e-06,
"min": 7.558897480400005e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010582456472560007,
"min": 0.00010582456472560007,
"max": 0.0033759361746879997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251960000000003,
"min": 0.10251960000000003,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4352744000000004,
"min": 1.3691136000000002,
"max": 2.4253120000000004,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026170804000000015,
"min": 0.00026170804000000015,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003663912560000002,
"min": 0.003663912560000002,
"max": 0.11254866880000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010115930810570717,
"min": 0.010115930810570717,
"max": 0.5531641840934753,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.14162303507328033,
"min": 0.14162303507328033,
"max": 3.8721492290496826,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 411.36986301369865,
"min": 392.6625,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30030.0,
"min": 15984.0,
"max": 32500.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4781805322402053,
"min": -1.0000000521540642,
"max": 1.5323099694214761,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 106.42899832129478,
"min": -32.000001668930054,
"max": 122.58479755371809,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4781805322402053,
"min": -1.0000000521540642,
"max": 1.5323099694214761,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 106.42899832129478,
"min": -32.000001668930054,
"max": 122.58479755371809,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.042992014791404166,
"min": 0.04185772229320719,
"max": 11.87327560596168,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.0954250649811,
"min": 3.0954250649811,
"max": 189.9724096953869,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673903618",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673905659"
},
"total": 2040.9183397279999,
"count": 1,
"self": 0.47752952799987725,
"children": {
"run_training.setup": {
"total": 0.12819366899998386,
"count": 1,
"self": 0.12819366899998386
},
"TrainerController.start_learning": {
"total": 2040.312616531,
"count": 1,
"self": 1.144449691970749,
"children": {
"TrainerController._reset_env": {
"total": 6.635410157000024,
"count": 1,
"self": 6.635410157000024
},
"TrainerController.advance": {
"total": 2032.4343087770294,
"count": 63556,
"self": 1.2100832249946052,
"children": {
"env_step": {
"total": 1352.9145740000904,
"count": 63556,
"self": 1251.5084683072068,
"children": {
"SubprocessEnvManager._take_step": {
"total": 100.69357860099785,
"count": 63556,
"self": 4.206092229947444,
"children": {
"TorchPolicy.evaluate": {
"total": 96.48748637105041,
"count": 62561,
"self": 33.0118417620738,
"children": {
"TorchPolicy.sample_actions": {
"total": 63.47564460897661,
"count": 62561,
"self": 63.47564460897661
}
}
}
}
},
"workers": {
"total": 0.7125270918857041,
"count": 63556,
"self": 0.0,
"children": {
"worker_root": {
"total": 2036.09002686207,
"count": 63556,
"is_parallel": true,
"self": 880.2520296271164,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017877259999750095,
"count": 1,
"is_parallel": true,
"self": 0.0006723129997681099,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011154130002068996,
"count": 8,
"is_parallel": true,
"self": 0.0011154130002068996
}
}
},
"UnityEnvironment.step": {
"total": 0.04536953900014851,
"count": 1,
"is_parallel": true,
"self": 0.0005180270002256293,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00044544399997903383,
"count": 1,
"is_parallel": true,
"self": 0.00044544399997903383
},
"communicator.exchange": {
"total": 0.04274929100006375,
"count": 1,
"is_parallel": true,
"self": 0.04274929100006375
},
"steps_from_proto": {
"total": 0.001656776999880094,
"count": 1,
"is_parallel": true,
"self": 0.00043463800011522835,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012221389997648657,
"count": 8,
"is_parallel": true,
"self": 0.0012221389997648657
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1155.8379972349537,
"count": 63555,
"is_parallel": true,
"self": 28.23294323798018,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.06711462201997,
"count": 63555,
"is_parallel": true,
"self": 23.06711462201997
},
"communicator.exchange": {
"total": 1002.3801885879593,
"count": 63555,
"is_parallel": true,
"self": 1002.3801885879593
},
"steps_from_proto": {
"total": 102.15775078699426,
"count": 63555,
"is_parallel": true,
"self": 21.526570627908313,
"children": {
"_process_rank_one_or_two_observation": {
"total": 80.63118015908594,
"count": 508440,
"is_parallel": true,
"self": 80.63118015908594
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 678.3096515519444,
"count": 63556,
"self": 2.071107793891997,
"children": {
"process_trajectory": {
"total": 146.1649301590569,
"count": 63556,
"self": 145.94714614705754,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21778401199935615,
"count": 2,
"self": 0.21778401199935615
}
}
},
"_update_policy": {
"total": 530.0736135989955,
"count": 442,
"self": 204.9283630389773,
"children": {
"TorchPPOOptimizer.update": {
"total": 325.1452505600182,
"count": 22818,
"self": 325.1452505600182
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.699998943484388e-07,
"count": 1,
"self": 8.699998943484388e-07
},
"TrainerController._save_models": {
"total": 0.09844703499993557,
"count": 1,
"self": 0.001488047999828268,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0969589870001073,
"count": 1,
"self": 0.0969589870001073
}
}
}
}
}
}
}