ppo-Pyramids-v1 / run_logs /timers.json
ch-bz's picture
First training of ML-Agents Pyramids
3a615b3 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.14624297618865967,
"min": 0.14624297618865967,
"max": 1.4647560119628906,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 4429.4072265625,
"min": 4429.4072265625,
"max": 44434.83984375,
"count": 100
},
"Pyramids.Step.mean": {
"value": 2999961.0,
"min": 29952.0,
"max": 2999961.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 2999961.0,
"min": 29952.0,
"max": 2999961.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.8048204183578491,
"min": -0.09897731989622116,
"max": 0.8590055704116821,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 238.22683715820312,
"min": -23.853534698486328,
"max": 261.1376953125,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.003933842293918133,
"min": -0.027270613238215446,
"max": 0.41345709562301636,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -1.1644172668457031,
"min": -8.099371910095215,
"max": 97.98933410644531,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07051334704668816,
"min": 0.06429423743355771,
"max": 0.07545404697173377,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9871868586536342,
"min": 0.48659723690472106,
"max": 1.1318107045760066,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013090788689580569,
"min": 0.00012420112438344576,
"max": 0.01626293453672487,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.18327104165412797,
"min": 0.0013662123682179033,
"max": 0.24394401805087304,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.5376566303380957e-06,
"min": 1.5376566303380957e-06,
"max": 0.00029838354339596195,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.152719282473334e-05,
"min": 2.152719282473334e-05,
"max": 0.0037177996607335,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10051251904761904,
"min": 0.10051251904761904,
"max": 0.19946118095238097,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4071752666666666,
"min": 1.3897045333333333,
"max": 2.737489833333334,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 6.120065285714288e-05,
"min": 6.120065285714288e-05,
"max": 0.009946171977142856,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0008568091400000004,
"min": 0.0008568091400000004,
"max": 0.12394272335000002,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.004076735116541386,
"min": 0.004063901957124472,
"max": 0.3282770812511444,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.05707429349422455,
"min": 0.05689462646842003,
"max": 2.2979395389556885,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 220.21875,
"min": 217.84285714285716,
"max": 999.0,
"count": 100
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28188.0,
"min": 15984.0,
"max": 33669.0,
"count": 100
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.779781237593852,
"min": -1.0000000521540642,
"max": 1.779781237593852,
"count": 100
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 227.81199841201305,
"min": -32.000001668930054,
"max": 245.50119867920876,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.779781237593852,
"min": -1.0000000521540642,
"max": 1.779781237593852,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 227.81199841201305,
"min": -32.000001668930054,
"max": 245.50119867920876,
"count": 100
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.00933070190561125,
"min": 0.009300382798619467,
"max": 6.441338911652565,
"count": 100
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.19432984391824,
"min": 1.19432984391824,
"max": 103.06142258644104,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1731688232",
"python_version": "3.10.6 (tags/v3.10.6:9c7b4bd, Aug 1 2022, 21:53:49) [MSC v.1932 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\songo\\Documents\\Python\\DeepRLCourse\\Scripts\\mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./build/UnityEnvironment --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0",
"mlagents_envs_version": "1.1.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1731692835"
},
"total": 4603.306294100001,
"count": 1,
"self": 0.25035030000435654,
"children": {
"run_training.setup": {
"total": 0.1570873999989999,
"count": 1,
"self": 0.1570873999989999
},
"TrainerController.start_learning": {
"total": 4602.898856399997,
"count": 1,
"self": 3.2654007006676693,
"children": {
"TrainerController._reset_env": {
"total": 4.993731700000353,
"count": 1,
"self": 4.993731700000353
},
"TrainerController.advance": {
"total": 4594.527222799328,
"count": 194290,
"self": 2.8569732006544655,
"children": {
"env_step": {
"total": 2418.813839399236,
"count": 194290,
"self": 1775.8495642986854,
"children": {
"SubprocessEnvManager._take_step": {
"total": 640.8690928998167,
"count": 194290,
"self": 9.036510999510938,
"children": {
"TorchPolicy.evaluate": {
"total": 631.8325819003057,
"count": 187570,
"self": 631.8325819003057
}
}
},
"workers": {
"total": 2.0951822007336887,
"count": 194290,
"self": 0.0,
"children": {
"worker_root": {
"total": 4595.95136709964,
"count": 194290,
"is_parallel": true,
"self": 3041.8285563000045,
"children": {
"steps_from_proto": {
"total": 0.0008488000003126217,
"count": 1,
"is_parallel": true,
"self": 0.0001990000000660075,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006498000002466142,
"count": 8,
"is_parallel": true,
"self": 0.0006498000002466142
}
}
},
"UnityEnvironment.step": {
"total": 1554.1219619996355,
"count": 194290,
"is_parallel": true,
"self": 51.483499799524,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 34.75150880024921,
"count": 194290,
"is_parallel": true,
"self": 34.75150880024921
},
"communicator.exchange": {
"total": 1333.029609400386,
"count": 194290,
"is_parallel": true,
"self": 1333.029609400386
},
"steps_from_proto": {
"total": 134.85734399947614,
"count": 194290,
"is_parallel": true,
"self": 26.87234490111223,
"children": {
"_process_rank_one_or_two_observation": {
"total": 107.98499909836391,
"count": 1554320,
"is_parallel": true,
"self": 107.98499909836391
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2172.856410199438,
"count": 194290,
"self": 6.307676598404214,
"children": {
"process_trajectory": {
"total": 351.15190310101025,
"count": 194290,
"self": 350.5796433010091,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5722598000011203,
"count": 6,
"self": 0.5722598000011203
}
}
},
"_update_policy": {
"total": 1815.3968305000235,
"count": 1376,
"self": 812.0426447996033,
"children": {
"TorchPPOOptimizer.update": {
"total": 1003.3541857004202,
"count": 68484,
"self": 1003.3541857004202
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.000009307172149e-07,
"count": 1,
"self": 6.000009307172149e-07
},
"TrainerController._save_models": {
"total": 0.11250060000020312,
"count": 1,
"self": 0.03100059999997029,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08150000000023283,
"count": 1,
"self": 0.08150000000023283
}
}
}
}
}
}
}