ppo-Pyramids / run_logs /timers.json
sabretoothedhugs's picture
First Push
76d85a6 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 1.1393401622772217,
"min": 0.8784369230270386,
"max": 1.4767630100250244,
"count": 13
},
"Pyramids.Policy.Entropy.sum": {
"value": 34289.58203125,
"min": 26816.921875,
"max": 44799.08203125,
"count": 13
},
"Pyramids.Step.mean": {
"value": 389892.0,
"min": 29952.0,
"max": 389892.0,
"count": 13
},
"Pyramids.Step.sum": {
"value": 389892.0,
"min": 29952.0,
"max": 389892.0,
"count": 13
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.0811186358332634,
"min": -0.09695868194103241,
"max": -0.059484679251909256,
"count": 13
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -19.549591064453125,
"min": -23.367042541503906,
"max": -14.273317337036133,
"count": 13
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.01895718090236187,
"min": 0.01834954507648945,
"max": 0.5088265538215637,
"count": 13
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.568680763244629,
"min": 4.422240257263184,
"max": 120.59188842773438,
"count": 13
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06880796320486134,
"min": 0.06713543501232867,
"max": 0.07518478184371438,
"count": 13
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9633114848680586,
"min": 0.4991555626259249,
"max": 0.9774021639682869,
"count": 13
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0011204746355431134,
"min": 0.0005412992572571332,
"max": 0.009430657141726034,
"count": 13
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.01568664489760359,
"min": 0.007036890344342732,
"max": 0.06601459999208224,
"count": 13
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.0002625531553394285,
"min": 0.0002625531553394285,
"max": 0.00029838354339596195,
"count": 13
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.003675744174751999,
"min": 0.0020691136102954665,
"max": 0.0039696198767934,
"count": 13
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1875177142857143,
"min": 0.1875177142857143,
"max": 0.19946118095238097,
"count": 13
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.625248,
"min": 1.3897045333333333,
"max": 2.7232066,
"count": 13
},
"Pyramids.Policy.Beta.mean": {
"value": 0.008753019657142858,
"min": 0.008753019657142858,
"max": 0.009946171977142856,
"count": 13
},
"Pyramids.Policy.Beta.sum": {
"value": 0.12254227520000001,
"min": 0.06897148288,
"max": 0.13232833934000002,
"count": 13
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.017215663567185402,
"min": 0.017215663567185402,
"max": 0.3760848939418793,
"count": 13
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.24101929366588593,
"min": 0.24101929366588593,
"max": 2.632594347000122,
"count": 13
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 988.25,
"min": 907.4375,
"max": 999.0,
"count": 13
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31624.0,
"min": 15984.0,
"max": 32439.0,
"count": 13
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.8014563031028956,
"min": -1.0000000521540642,
"max": -0.47057504556141794,
"count": 13
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -25.64660169929266,
"min": -32.000001668930054,
"max": -15.058401457965374,
"count": 13
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.8014563031028956,
"min": -1.0000000521540642,
"max": -0.47057504556141794,
"count": 13
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -25.64660169929266,
"min": -32.000001668930054,
"max": -15.058401457965374,
"count": 13
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.18373357944801683,
"min": 0.18373357944801683,
"max": 7.275272320024669,
"count": 13
},
"Pyramids.Policy.RndReward.sum": {
"value": 5.879474542336538,
"min": 5.879474542336538,
"max": 116.4043571203947,
"count": 13
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 13
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 13
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1705945288",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1705946209"
},
"total": 920.3789021709999,
"count": 1,
"self": 0.48357766200024344,
"children": {
"run_training.setup": {
"total": 0.05568737300018256,
"count": 1,
"self": 0.05568737300018256
},
"TrainerController.start_learning": {
"total": 919.8396371359995,
"count": 1,
"self": 0.6868668499291744,
"children": {
"TrainerController._reset_env": {
"total": 2.194167170999208,
"count": 1,
"self": 2.194167170999208
},
"TrainerController.advance": {
"total": 916.9580880060712,
"count": 25831,
"self": 0.7112355071885759,
"children": {
"env_step": {
"total": 642.4649182829326,
"count": 25831,
"self": 579.730725387155,
"children": {
"SubprocessEnvManager._take_step": {
"total": 62.30596385796434,
"count": 25831,
"self": 2.2832127269966804,
"children": {
"TorchPolicy.evaluate": {
"total": 60.02275113096766,
"count": 25699,
"self": 60.02275113096766
}
}
},
"workers": {
"total": 0.42822903781325294,
"count": 25831,
"self": 0.0,
"children": {
"worker_root": {
"total": 917.3812426111299,
"count": 25831,
"is_parallel": true,
"self": 394.60158639005203,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021349349999582046,
"count": 1,
"is_parallel": true,
"self": 0.00065407800138928,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014808569985689246,
"count": 8,
"is_parallel": true,
"self": 0.0014808569985689246
}
}
},
"UnityEnvironment.step": {
"total": 0.06054217100063397,
"count": 1,
"is_parallel": true,
"self": 0.000695821000590513,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00051027100016654,
"count": 1,
"is_parallel": true,
"self": 0.00051027100016654
},
"communicator.exchange": {
"total": 0.05716970100002072,
"count": 1,
"is_parallel": true,
"self": 0.05716970100002072
},
"steps_from_proto": {
"total": 0.002166377999856195,
"count": 1,
"is_parallel": true,
"self": 0.00043967499823338585,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001726703001622809,
"count": 8,
"is_parallel": true,
"self": 0.001726703001622809
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 522.7796562210779,
"count": 25830,
"is_parallel": true,
"self": 16.21746677603369,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 11.665922188113655,
"count": 25830,
"is_parallel": true,
"self": 11.665922188113655
},
"communicator.exchange": {
"total": 447.52332068497617,
"count": 25830,
"is_parallel": true,
"self": 447.52332068497617
},
"steps_from_proto": {
"total": 47.372946571954344,
"count": 25830,
"is_parallel": true,
"self": 9.703481001854925,
"children": {
"_process_rank_one_or_two_observation": {
"total": 37.66946557009942,
"count": 206640,
"is_parallel": true,
"self": 37.66946557009942
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 273.78193421595006,
"count": 25831,
"self": 1.1323134169506375,
"children": {
"process_trajectory": {
"total": 55.33128354399378,
"count": 25831,
"self": 55.33128354399378
},
"_update_policy": {
"total": 217.31833725500564,
"count": 169,
"self": 128.64882026591204,
"children": {
"TorchPPOOptimizer.update": {
"total": 88.66951698909361,
"count": 9406,
"self": 88.66951698909361
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.6189997040783055e-06,
"count": 1,
"self": 1.6189997040783055e-06
},
"TrainerController._save_models": {
"total": 0.0005134900002303766,
"count": 1,
"self": 2.7367000257072505e-05,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0004861229999733041,
"count": 1,
"self": 0.0004861229999733041
}
}
}
}
}
}
}