ppo-Huggy / run_logs /timers.json
AbhilashGop's picture
Huggy
7d80623 verified
raw
history blame contribute delete
No virus
17.4 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.402817726135254,
"min": 1.402814507484436,
"max": 1.4271652698516846,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 72104.828125,
"min": 68495.5,
"max": 78638.9921875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 91.01814882032667,
"min": 87.32155477031802,
"max": 380.8560606060606,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 50151.0,
"min": 48955.0,
"max": 50273.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999953.0,
"min": 49656.0,
"max": 1999953.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999953.0,
"min": 49656.0,
"max": 1999953.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4101738929748535,
"min": 0.07388967275619507,
"max": 2.4915778636932373,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1328.005859375,
"min": 9.679547309875488,
"max": 1354.43310546875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7165766814875734,
"min": 1.7932955040276506,
"max": 3.9434747885857986,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2047.8337514996529,
"min": 234.92171102762222,
"max": 2127.811678826809,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7165766814875734,
"min": 1.7932955040276506,
"max": 3.9434747885857986,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2047.8337514996529,
"min": 234.92171102762222,
"max": 2127.811678826809,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015378741165235018,
"min": 0.014467732715335538,
"max": 0.020647620188780517,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.030757482330470036,
"min": 0.028935465430671076,
"max": 0.05824076614808291,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.0508504361535112,
"min": 0.02263830776015917,
"max": 0.058185217715799806,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1017008723070224,
"min": 0.04570263791829347,
"max": 0.16949223056435586,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.658123447324993e-06,
"min": 4.658123447324993e-06,
"max": 0.000295311001563,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.316246894649987e-06,
"min": 9.316246894649987e-06,
"max": 0.0008441995686001499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10155267500000001,
"min": 0.10155267500000001,
"max": 0.19843699999999997,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20310535000000002,
"min": 0.20310535000000002,
"max": 0.58139985,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.747848249999992e-05,
"min": 8.747848249999992e-05,
"max": 0.0049220063,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00017495696499999984,
"min": 0.00017495696499999984,
"max": 0.014071852514999997,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1713848165",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1713852954"
},
"total": 4789.0342706050005,
"count": 1,
"self": 1.0007839629997761,
"children": {
"run_training.setup": {
"total": 0.10981150899999648,
"count": 1,
"self": 0.10981150899999648
},
"TrainerController.start_learning": {
"total": 4787.923675133001,
"count": 1,
"self": 8.267076351903597,
"children": {
"TrainerController._reset_env": {
"total": 4.419998749999991,
"count": 1,
"self": 4.419998749999991
},
"TrainerController.advance": {
"total": 4775.040991082098,
"count": 232395,
"self": 8.200519821008129,
"children": {
"env_step": {
"total": 3147.6302819721095,
"count": 232395,
"self": 2636.064194080285,
"children": {
"SubprocessEnvManager._take_step": {
"total": 506.2382060099701,
"count": 232395,
"self": 33.805611331162936,
"children": {
"TorchPolicy.evaluate": {
"total": 472.43259467880716,
"count": 223065,
"self": 472.43259467880716
}
}
},
"workers": {
"total": 5.327881881854523,
"count": 232395,
"self": 0.0,
"children": {
"worker_root": {
"total": 4773.3513609488455,
"count": 232395,
"is_parallel": true,
"self": 2681.970815874953,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0014057930000035412,
"count": 1,
"is_parallel": true,
"self": 0.0003430230000276424,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010627699999758988,
"count": 2,
"is_parallel": true,
"self": 0.0010627699999758988
}
}
},
"UnityEnvironment.step": {
"total": 0.04798650700001872,
"count": 1,
"is_parallel": true,
"self": 0.0007205230001545715,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003669469999749708,
"count": 1,
"is_parallel": true,
"self": 0.0003669469999749708
},
"communicator.exchange": {
"total": 0.04547183699992274,
"count": 1,
"is_parallel": true,
"self": 0.04547183699992274
},
"steps_from_proto": {
"total": 0.001427199999966433,
"count": 1,
"is_parallel": true,
"self": 0.00044914799991602194,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0009780520000504112,
"count": 2,
"is_parallel": true,
"self": 0.0009780520000504112
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2091.3805450738923,
"count": 232394,
"is_parallel": true,
"self": 65.40288614833253,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 116.41829711976129,
"count": 232394,
"is_parallel": true,
"self": 116.41829711976129
},
"communicator.exchange": {
"total": 1767.0531454089096,
"count": 232394,
"is_parallel": true,
"self": 1767.0531454089096
},
"steps_from_proto": {
"total": 142.50621639688904,
"count": 232394,
"is_parallel": true,
"self": 46.29078524773854,
"children": {
"_process_rank_one_or_two_observation": {
"total": 96.2154311491505,
"count": 464788,
"is_parallel": true,
"self": 96.2154311491505
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1619.2101892889802,
"count": 232395,
"self": 12.874782295883733,
"children": {
"process_trajectory": {
"total": 255.67347801009907,
"count": 232395,
"self": 253.99844690409952,
"children": {
"RLTrainer._checkpoint": {
"total": 1.675031105999551,
"count": 10,
"self": 1.675031105999551
}
}
},
"_update_policy": {
"total": 1350.6619289829973,
"count": 96,
"self": 344.70918374701216,
"children": {
"TorchPPOOptimizer.update": {
"total": 1005.9527452359852,
"count": 2880,
"self": 1005.9527452359852
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3409999155555852e-06,
"count": 1,
"self": 1.3409999155555852e-06
},
"TrainerController._save_models": {
"total": 0.19560760799959098,
"count": 1,
"self": 0.00446634499985521,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19114126299973577,
"count": 1,
"self": 0.19114126299973577
}
}
}
}
}
}
}