ppo-Huggy / run_logs /timers.json
sgoodfriend's picture
Huggy
d57f03c
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.401579737663269,
"min": 1.401579737663269,
"max": 1.427011489868164,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70893.3046875,
"min": 67682.4296875,
"max": 77200.3359375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 95.63442940038685,
"min": 91.40147329650092,
"max": 412.8181818181818,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49443.0,
"min": 48760.0,
"max": 50195.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999962.0,
"min": 49825.0,
"max": 1999962.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999962.0,
"min": 49825.0,
"max": 1999962.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3821113109588623,
"min": 0.11002949625253677,
"max": 2.446507215499878,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1231.551513671875,
"min": 13.203539848327637,
"max": 1299.28759765625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7215139974247324,
"min": 1.6712559215724467,
"max": 3.9338517560292097,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1924.0227366685867,
"min": 200.55071058869362,
"max": 2015.481406211853,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7215139974247324,
"min": 1.6712559215724467,
"max": 3.9338517560292097,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1924.0227366685867,
"min": 200.55071058869362,
"max": 2015.481406211853,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017080290178354416,
"min": 0.012400016612567318,
"max": 0.020006948475687146,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05124087053506325,
"min": 0.024800033225134636,
"max": 0.0565607911577293,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.059344367020659976,
"min": 0.02235854342579842,
"max": 0.059344367020659976,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17803310106197992,
"min": 0.04471708685159684,
"max": 0.17803310106197992,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.13754895418333e-06,
"min": 3.13754895418333e-06,
"max": 0.000295357276547575,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.41264686254999e-06,
"min": 9.41264686254999e-06,
"max": 0.0008443378685540498,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10104581666666669,
"min": 0.10104581666666669,
"max": 0.19845242500000004,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3031374500000001,
"min": 0.2072778,
"max": 0.5814459499999999,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.218625166666662e-05,
"min": 6.218625166666662e-05,
"max": 0.004922776007499999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00018655875499999988,
"min": 0.00018655875499999988,
"max": 0.014074152904999998,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670624306",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670626718"
},
"total": 2411.935393884,
"count": 1,
"self": 0.3920809279993591,
"children": {
"run_training.setup": {
"total": 0.18210540600000513,
"count": 1,
"self": 0.18210540600000513
},
"TrainerController.start_learning": {
"total": 2411.3612075500005,
"count": 1,
"self": 4.484078541052895,
"children": {
"TrainerController._reset_env": {
"total": 11.15118441800007,
"count": 1,
"self": 11.15118441800007
},
"TrainerController.advance": {
"total": 2395.6096339369474,
"count": 231818,
"self": 4.421909898806007,
"children": {
"env_step": {
"total": 1899.1034585980594,
"count": 231818,
"self": 1594.650417645129,
"children": {
"SubprocessEnvManager._take_step": {
"total": 301.51920069792095,
"count": 231818,
"self": 16.2980704349917,
"children": {
"TorchPolicy.evaluate": {
"total": 285.22113026292925,
"count": 222978,
"self": 70.69815062596194,
"children": {
"TorchPolicy.sample_actions": {
"total": 214.5229796369673,
"count": 222978,
"self": 214.5229796369673
}
}
}
}
},
"workers": {
"total": 2.933840255009386,
"count": 231818,
"self": 0.0,
"children": {
"worker_root": {
"total": 2402.7138792839346,
"count": 231818,
"is_parallel": true,
"self": 1101.7182889668939,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004748690999917926,
"count": 1,
"is_parallel": true,
"self": 0.0004691749999210515,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.004279515999996875,
"count": 2,
"is_parallel": true,
"self": 0.004279515999996875
}
}
},
"UnityEnvironment.step": {
"total": 0.028282058000058896,
"count": 1,
"is_parallel": true,
"self": 0.00029278399995291693,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002011150000953421,
"count": 1,
"is_parallel": true,
"self": 0.0002011150000953421
},
"communicator.exchange": {
"total": 0.026987275000010413,
"count": 1,
"is_parallel": true,
"self": 0.026987275000010413
},
"steps_from_proto": {
"total": 0.0008008840000002238,
"count": 1,
"is_parallel": true,
"self": 0.00027488699993227783,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005259970000679459,
"count": 2,
"is_parallel": true,
"self": 0.0005259970000679459
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1300.9955903170408,
"count": 231817,
"is_parallel": true,
"self": 36.23590219388234,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 81.2504391300655,
"count": 231817,
"is_parallel": true,
"self": 81.2504391300655
},
"communicator.exchange": {
"total": 1082.785469794099,
"count": 231817,
"is_parallel": true,
"self": 1082.785469794099
},
"steps_from_proto": {
"total": 100.72377919899384,
"count": 231817,
"is_parallel": true,
"self": 43.37857772695634,
"children": {
"_process_rank_one_or_two_observation": {
"total": 57.3452014720375,
"count": 463634,
"is_parallel": true,
"self": 57.3452014720375
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 492.08426544008194,
"count": 231818,
"self": 7.0146492500738304,
"children": {
"process_trajectory": {
"total": 159.4365259680078,
"count": 231818,
"self": 158.95027861300787,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4862473549999322,
"count": 4,
"self": 0.4862473549999322
}
}
},
"_update_policy": {
"total": 325.6330902220003,
"count": 97,
"self": 270.02341242701596,
"children": {
"TorchPPOOptimizer.update": {
"total": 55.60967779498435,
"count": 2910,
"self": 55.60967779498435
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5400000847876072e-06,
"count": 1,
"self": 1.5400000847876072e-06
},
"TrainerController._save_models": {
"total": 0.11630911400015975,
"count": 1,
"self": 0.0024477780002598593,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1138613359998999,
"count": 1,
"self": 0.1138613359998999
}
}
}
}
}
}
}