ppo-Pyramids / run_logs /timers.json
margaretshark's picture
First Push
43d973a
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6246396899223328,
"min": 0.6197971105575562,
"max": 1.5126522779464722,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 18829.138671875,
"min": 18445.162109375,
"max": 45887.8203125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989967.0,
"min": 29952.0,
"max": 989967.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989967.0,
"min": 29952.0,
"max": 989967.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5144776105880737,
"min": -0.15559345483779907,
"max": 0.5316869616508484,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 140.452392578125,
"min": -37.342430114746094,
"max": 147.27728271484375,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.07305539399385452,
"min": -0.07305539399385452,
"max": 0.21210691332817078,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -19.944122314453125,
"min": -19.944122314453125,
"max": 50.90565872192383,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06673389749942414,
"min": 0.06384169656338677,
"max": 0.07474782599837496,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.934274564991938,
"min": 0.4980725438712732,
"max": 1.0650872443460258,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.012759620974109221,
"min": 6.04095597939952e-05,
"max": 0.013907077075384137,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.1786346936375291,
"min": 0.0008457338371159328,
"max": 0.20834170208827876,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.451861801792859e-06,
"min": 7.451861801792859e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010432606522510002,
"min": 0.00010432606522510002,
"max": 0.0035071934309355997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10248392142857143,
"min": 0.10248392142857143,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4347749,
"min": 1.3886848,
"max": 2.5690644,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025814375071428583,
"min": 0.00025814375071428583,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036140125100000016,
"min": 0.0036140125100000016,
"max": 0.11692953356,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008529199287295341,
"min": 0.008438932709395885,
"max": 0.37847355008125305,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.11940878629684448,
"min": 0.11814506351947784,
"max": 2.6493148803710938,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 374.2151898734177,
"min": 364.44444444444446,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29563.0,
"min": 15984.0,
"max": 32728.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6004582058779802,
"min": -1.0000000521540642,
"max": 1.6004582058779802,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 126.43619826436043,
"min": -30.999801620841026,
"max": 128.47859855741262,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6004582058779802,
"min": -1.0000000521540642,
"max": 1.6004582058779802,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 126.43619826436043,
"min": -30.999801620841026,
"max": 128.47859855741262,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03337040053469813,
"min": 0.03337040053469813,
"max": 7.749784346669912,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.636261642241152,
"min": 2.636261642241152,
"max": 123.9965495467186,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1698498925",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training 2 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1698501039"
},
"total": 2114.1815712210005,
"count": 1,
"self": 1.1427680160009004,
"children": {
"run_training.setup": {
"total": 0.052593939999951544,
"count": 1,
"self": 0.052593939999951544
},
"TrainerController.start_learning": {
"total": 2112.9862092649996,
"count": 1,
"self": 1.2166943039087528,
"children": {
"TrainerController._reset_env": {
"total": 3.8433889519997138,
"count": 1,
"self": 3.8433889519997138
},
"TrainerController.advance": {
"total": 2107.814769991091,
"count": 63558,
"self": 1.3189337740109295,
"children": {
"env_step": {
"total": 1478.7869760530716,
"count": 63558,
"self": 1351.3888449880378,
"children": {
"SubprocessEnvManager._take_step": {
"total": 126.62905455390728,
"count": 63558,
"self": 4.502868956939437,
"children": {
"TorchPolicy.evaluate": {
"total": 122.12618559696784,
"count": 62560,
"self": 122.12618559696784
}
}
},
"workers": {
"total": 0.7690765111265137,
"count": 63558,
"self": 0.0,
"children": {
"worker_root": {
"total": 2108.462496060061,
"count": 63558,
"is_parallel": true,
"self": 866.6521580960743,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0022104560002844664,
"count": 1,
"is_parallel": true,
"self": 0.0006285180011218472,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015819379991626192,
"count": 8,
"is_parallel": true,
"self": 0.0015819379991626192
}
}
},
"UnityEnvironment.step": {
"total": 0.09347373400032666,
"count": 1,
"is_parallel": true,
"self": 0.0006404179998753534,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00048926300041785,
"count": 1,
"is_parallel": true,
"self": 0.00048926300041785
},
"communicator.exchange": {
"total": 0.09065824900017105,
"count": 1,
"is_parallel": true,
"self": 0.09065824900017105
},
"steps_from_proto": {
"total": 0.0016858039998624008,
"count": 1,
"is_parallel": true,
"self": 0.00037898600021435414,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013068179996480467,
"count": 8,
"is_parallel": true,
"self": 0.0013068179996480467
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1241.8103379639865,
"count": 63557,
"is_parallel": true,
"self": 34.379278015023374,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.919968506048008,
"count": 63557,
"is_parallel": true,
"self": 23.919968506048008
},
"communicator.exchange": {
"total": 1086.5193277379803,
"count": 63557,
"is_parallel": true,
"self": 1086.5193277379803
},
"steps_from_proto": {
"total": 96.99176370493478,
"count": 63557,
"is_parallel": true,
"self": 18.971812437144763,
"children": {
"_process_rank_one_or_two_observation": {
"total": 78.01995126779002,
"count": 508456,
"is_parallel": true,
"self": 78.01995126779002
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 627.7088601640085,
"count": 63558,
"self": 2.47260705603685,
"children": {
"process_trajectory": {
"total": 118.36777949097814,
"count": 63558,
"self": 118.16682913497789,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20095035600024858,
"count": 2,
"self": 0.20095035600024858
}
}
},
"_update_policy": {
"total": 506.86847361699347,
"count": 450,
"self": 302.4250503361309,
"children": {
"TorchPPOOptimizer.update": {
"total": 204.4434232808626,
"count": 22779,
"self": 204.4434232808626
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.247000000148546e-06,
"count": 1,
"self": 1.247000000148546e-06
},
"TrainerController._save_models": {
"total": 0.11135477100015123,
"count": 1,
"self": 0.001853171999755432,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1095015990003958,
"count": 1,
"self": 0.1095015990003958
}
}
}
}
}
}
}