ppo-Huggy / run_logs /timers.json
DavidAfonsoValente's picture
Huggy
02c83f7
raw
history blame contribute delete
No virus
17.4 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.410196304321289,
"min": 1.410196304321289,
"max": 1.4282119274139404,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70327.8984375,
"min": 69285.09375,
"max": 77354.8984375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 89.57531760435572,
"min": 80.24675324675324,
"max": 385.24615384615385,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49356.0,
"min": 49214.0,
"max": 50082.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999991.0,
"min": 49561.0,
"max": 1999991.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999991.0,
"min": 49561.0,
"max": 1999991.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3875792026519775,
"min": 0.1267990618944168,
"max": 2.466287612915039,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1315.55615234375,
"min": 16.357078552246094,
"max": 1453.3931884765625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.6999197679076565,
"min": 1.8566290491311126,
"max": 4.016564288073116,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2038.6557921171188,
"min": 239.5051473379135,
"max": 2319.503956437111,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.6999197679076565,
"min": 1.8566290491311126,
"max": 4.016564288073116,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2038.6557921171188,
"min": 239.5051473379135,
"max": 2319.503956437111,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016785664005308513,
"min": 0.013990444232735577,
"max": 0.020199262915088587,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05035699201592554,
"min": 0.030843315727543084,
"max": 0.060597788745265765,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.0541473965264029,
"min": 0.022702645417302846,
"max": 0.06311731984217962,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1624421895792087,
"min": 0.04540529083460569,
"max": 0.18935195952653885,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.458398847233325e-06,
"min": 3.458398847233325e-06,
"max": 0.000295292626569125,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0375196541699975e-05,
"min": 1.0375196541699975e-05,
"max": 0.00084404266865245,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10115276666666667,
"min": 0.10115276666666667,
"max": 0.198430875,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3034583,
"min": 0.20750065,
"max": 0.58134755,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.752305666666654e-05,
"min": 6.752305666666654e-05,
"max": 0.0049217006625,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002025691699999996,
"min": 0.0002025691699999996,
"max": 0.014069242745000004,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1702661456",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1702663990"
},
"total": 2534.074081449,
"count": 1,
"self": 0.44371237600034874,
"children": {
"run_training.setup": {
"total": 0.05114397199997711,
"count": 1,
"self": 0.05114397199997711
},
"TrainerController.start_learning": {
"total": 2533.579225101,
"count": 1,
"self": 4.667168503018729,
"children": {
"TrainerController._reset_env": {
"total": 3.2252808480000112,
"count": 1,
"self": 3.2252808480000112
},
"TrainerController.advance": {
"total": 2525.578315334981,
"count": 232573,
"self": 4.894461345959371,
"children": {
"env_step": {
"total": 2011.6513324879822,
"count": 232573,
"self": 1667.896120232203,
"children": {
"SubprocessEnvManager._take_step": {
"total": 340.7415618619277,
"count": 232573,
"self": 17.561886632799883,
"children": {
"TorchPolicy.evaluate": {
"total": 323.17967522912784,
"count": 222953,
"self": 323.17967522912784
}
}
},
"workers": {
"total": 3.0136503938516626,
"count": 232573,
"self": 0.0,
"children": {
"worker_root": {
"total": 2525.8722689128936,
"count": 232573,
"is_parallel": true,
"self": 1173.2819196747398,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0006747250000671556,
"count": 1,
"is_parallel": true,
"self": 0.00023102800003016455,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004436970000369911,
"count": 2,
"is_parallel": true,
"self": 0.0004436970000369911
}
}
},
"UnityEnvironment.step": {
"total": 0.03130676700004642,
"count": 1,
"is_parallel": true,
"self": 0.000336960000140607,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002450849999604543,
"count": 1,
"is_parallel": true,
"self": 0.0002450849999604543
},
"communicator.exchange": {
"total": 0.029994890000011765,
"count": 1,
"is_parallel": true,
"self": 0.029994890000011765
},
"steps_from_proto": {
"total": 0.0007298319999335945,
"count": 1,
"is_parallel": true,
"self": 0.00021907600000758976,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005107559999260047,
"count": 2,
"is_parallel": true,
"self": 0.0005107559999260047
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1352.5903492381537,
"count": 232572,
"is_parallel": true,
"self": 41.8287449781933,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 87.20865759001504,
"count": 232572,
"is_parallel": true,
"self": 87.20865759001504
},
"communicator.exchange": {
"total": 1129.4069545109764,
"count": 232572,
"is_parallel": true,
"self": 1129.4069545109764
},
"steps_from_proto": {
"total": 94.14599215896897,
"count": 232572,
"is_parallel": true,
"self": 34.97826694000935,
"children": {
"_process_rank_one_or_two_observation": {
"total": 59.16772521895962,
"count": 465144,
"is_parallel": true,
"self": 59.16772521895962
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 509.0325215010396,
"count": 232573,
"self": 7.196402642107273,
"children": {
"process_trajectory": {
"total": 163.62892595493497,
"count": 232573,
"self": 162.36331379493538,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2656121599995913,
"count": 10,
"self": 1.2656121599995913
}
}
},
"_update_policy": {
"total": 338.20719290399734,
"count": 97,
"self": 273.32955850599956,
"children": {
"TorchPPOOptimizer.update": {
"total": 64.87763439799778,
"count": 2910,
"self": 64.87763439799778
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0150001799047459e-06,
"count": 1,
"self": 1.0150001799047459e-06
},
"TrainerController._save_models": {
"total": 0.1084593999999015,
"count": 1,
"self": 0.001878084000054514,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10658131599984699,
"count": 1,
"self": 0.10658131599984699
}
}
}
}
}
}
}