ppo-Huggy / run_logs /timers.json
akgeni's picture
Huggy Default
62ca176
raw
history blame contribute delete
No virus
17.9 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4033162593841553,
"min": 1.4033162593841553,
"max": 1.4291839599609375,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70823.96875,
"min": 69246.578125,
"max": 77808.328125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 103.6213389121339,
"min": 80.23414634146341,
"max": 403.5725806451613,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49531.0,
"min": 48886.0,
"max": 50247.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999931.0,
"min": 49788.0,
"max": 1999931.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999931.0,
"min": 49788.0,
"max": 1999931.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.383114814758301,
"min": 0.039106469601392746,
"max": 2.4554741382598877,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1139.12890625,
"min": 4.81009578704834,
"max": 1479.4493408203125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.6732675022909334,
"min": 1.6868791743749525,
"max": 3.918494724779388,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1755.821866095066,
"min": 207.48613844811916,
"max": 2266.3204256892204,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.6732675022909334,
"min": 1.6868791743749525,
"max": 3.918494724779388,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1755.821866095066,
"min": 207.48613844811916,
"max": 2266.3204256892204,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.018067041301077957,
"min": 0.013448072352427214,
"max": 0.021407501588449424,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.054201123903233867,
"min": 0.02689614470485443,
"max": 0.05600913358503021,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.048701976529426046,
"min": 0.023229338725407916,
"max": 0.06938012138836913,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.14610592958827814,
"min": 0.04645867745081583,
"max": 0.2081403641651074,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.4058488647499967e-06,
"min": 3.4058488647499967e-06,
"max": 0.00029535330154889993,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.021754659424999e-05,
"min": 1.021754659424999e-05,
"max": 0.00084428536857155,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10113524999999997,
"min": 0.10113524999999997,
"max": 0.19845110000000005,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3034057499999999,
"min": 0.20741394999999996,
"max": 0.58142845,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.664897499999996e-05,
"min": 6.664897499999996e-05,
"max": 0.004922709889999998,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019994692499999988,
"min": 0.00019994692499999988,
"max": 0.014073279655,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1671774762",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1671776988"
},
"total": 2225.984653452,
"count": 1,
"self": 0.39509331499994005,
"children": {
"run_training.setup": {
"total": 0.1021809830000393,
"count": 1,
"self": 0.1021809830000393
},
"TrainerController.start_learning": {
"total": 2225.4873791540003,
"count": 1,
"self": 3.8631977839108913,
"children": {
"TrainerController._reset_env": {
"total": 7.788320384000031,
"count": 1,
"self": 7.788320384000031
},
"TrainerController.advance": {
"total": 2213.722803778089,
"count": 232236,
"self": 3.948941850032952,
"children": {
"env_step": {
"total": 1745.5287680240351,
"count": 232236,
"self": 1463.9942648122865,
"children": {
"SubprocessEnvManager._take_step": {
"total": 279.011508031871,
"count": 232236,
"self": 14.445469112956971,
"children": {
"TorchPolicy.evaluate": {
"total": 264.56603891891405,
"count": 223039,
"self": 66.16494180789459,
"children": {
"TorchPolicy.sample_actions": {
"total": 198.40109711101945,
"count": 223039,
"self": 198.40109711101945
}
}
}
}
},
"workers": {
"total": 2.5229951798777392,
"count": 232236,
"self": 0.0,
"children": {
"worker_root": {
"total": 2217.354980584956,
"count": 232236,
"is_parallel": true,
"self": 1014.5171932018598,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020557279999593447,
"count": 1,
"is_parallel": true,
"self": 0.0003223770000886361,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017333509998707086,
"count": 2,
"is_parallel": true,
"self": 0.0017333509998707086
}
}
},
"UnityEnvironment.step": {
"total": 0.027148061000048074,
"count": 1,
"is_parallel": true,
"self": 0.00029159700000036537,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00016882699992493144,
"count": 1,
"is_parallel": true,
"self": 0.00016882699992493144
},
"communicator.exchange": {
"total": 0.025987855000039417,
"count": 1,
"is_parallel": true,
"self": 0.025987855000039417
},
"steps_from_proto": {
"total": 0.0006997820000833599,
"count": 1,
"is_parallel": true,
"self": 0.00024159700012660323,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00045818499995675666,
"count": 2,
"is_parallel": true,
"self": 0.00045818499995675666
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1202.8377873830964,
"count": 232235,
"is_parallel": true,
"self": 34.47277394817911,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 77.96692821689328,
"count": 232235,
"is_parallel": true,
"self": 77.96692821689328
},
"communicator.exchange": {
"total": 995.8075172630678,
"count": 232235,
"is_parallel": true,
"self": 995.8075172630678
},
"steps_from_proto": {
"total": 94.59056795495621,
"count": 232235,
"is_parallel": true,
"self": 40.6131053879551,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.97746256700111,
"count": 464470,
"is_parallel": true,
"self": 53.97746256700111
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 464.24509390402125,
"count": 232236,
"self": 5.945064607894096,
"children": {
"process_trajectory": {
"total": 148.45224846212875,
"count": 232236,
"self": 147.2895266071281,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1627218550006546,
"count": 10,
"self": 1.1627218550006546
}
}
},
"_update_policy": {
"total": 309.8477808339984,
"count": 97,
"self": 257.2678437480115,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.5799370859869,
"count": 2910,
"self": 52.5799370859869
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.699998943484388e-07,
"count": 1,
"self": 8.699998943484388e-07
},
"TrainerController._save_models": {
"total": 0.11305633800020587,
"count": 1,
"self": 0.002108389000113675,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1109479490000922,
"count": 1,
"self": 0.1109479490000922
}
}
}
}
}
}
}