ppo-Huggy / run_logs /timers.json
feasible's picture
Huggy
4874b1d
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4051117897033691,
"min": 1.4051117897033691,
"max": 1.429174780845642,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70868.21875,
"min": 67893.515625,
"max": 77214.734375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 122.00496277915633,
"min": 101.68518518518519,
"max": 435.2086956521739,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49168.0,
"min": 48981.0,
"max": 50350.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999536.0,
"min": 49477.0,
"max": 1999536.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999536.0,
"min": 49477.0,
"max": 1999536.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.29941463470459,
"min": 0.06718219816684723,
"max": 2.379878282546997,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 926.6640625,
"min": 7.658770561218262,
"max": 1133.51708984375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.5068947763064364,
"min": 1.9440466803416871,
"max": 3.8347462564706802,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1413.2785948514938,
"min": 221.62132155895233,
"max": 1710.4028007388115,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.5068947763064364,
"min": 1.9440466803416871,
"max": 3.8347462564706802,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1413.2785948514938,
"min": 221.62132155895233,
"max": 1710.4028007388115,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016993636454248593,
"min": 0.013419511977958286,
"max": 0.019247356598013236,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05098090936274578,
"min": 0.026896895280030246,
"max": 0.05687940339242535,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04409962478611205,
"min": 0.022889673120031755,
"max": 0.06069572145740191,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.13229887435833615,
"min": 0.04577934624006351,
"max": 0.16575602777302265,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.2618989127333255e-06,
"min": 3.2618989127333255e-06,
"max": 0.0002953579515473499,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.785696738199977e-06,
"min": 9.785696738199977e-06,
"max": 0.0008440977186341,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10108726666666663,
"min": 0.10108726666666663,
"max": 0.19845265000000004,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3032617999999999,
"min": 0.20730180000000004,
"max": 0.5813659,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.425460666666655e-05,
"min": 6.425460666666655e-05,
"max": 0.004922787235,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019276381999999966,
"min": 0.00019276381999999966,
"max": 0.01407015841,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1671984434",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1671986701"
},
"total": 2266.6311234610002,
"count": 1,
"self": 0.38919860900023195,
"children": {
"run_training.setup": {
"total": 0.11253593300034481,
"count": 1,
"self": 0.11253593300034481
},
"TrainerController.start_learning": {
"total": 2266.1293889189997,
"count": 1,
"self": 3.871102323890682,
"children": {
"TrainerController._reset_env": {
"total": 7.245321189999686,
"count": 1,
"self": 7.245321189999686
},
"TrainerController.advance": {
"total": 2254.898298627109,
"count": 230754,
"self": 4.045946738337989,
"children": {
"env_step": {
"total": 1787.3356595578862,
"count": 230754,
"self": 1495.1846741406548,
"children": {
"SubprocessEnvManager._take_step": {
"total": 289.50303254723394,
"count": 230754,
"self": 14.59273681641207,
"children": {
"TorchPolicy.evaluate": {
"total": 274.9102957308219,
"count": 223083,
"self": 69.02752846797557,
"children": {
"TorchPolicy.sample_actions": {
"total": 205.8827672628463,
"count": 223083,
"self": 205.8827672628463
}
}
}
}
},
"workers": {
"total": 2.6479528699974253,
"count": 230754,
"self": 0.0,
"children": {
"worker_root": {
"total": 2258.2416523389556,
"count": 230754,
"is_parallel": true,
"self": 1031.7770675418783,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017736459999468934,
"count": 1,
"is_parallel": true,
"self": 0.0003384949995961506,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014351510003507428,
"count": 2,
"is_parallel": true,
"self": 0.0014351510003507428
}
}
},
"UnityEnvironment.step": {
"total": 0.03023527400000603,
"count": 1,
"is_parallel": true,
"self": 0.00027826199993796763,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00018047399998977198,
"count": 1,
"is_parallel": true,
"self": 0.00018047399998977198
},
"communicator.exchange": {
"total": 0.02907417300002635,
"count": 1,
"is_parallel": true,
"self": 0.02907417300002635
},
"steps_from_proto": {
"total": 0.000702365000051941,
"count": 1,
"is_parallel": true,
"self": 0.0002324170000065351,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00046994800004540593,
"count": 2,
"is_parallel": true,
"self": 0.00046994800004540593
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1226.4645847970774,
"count": 230753,
"is_parallel": true,
"self": 34.910059644080775,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 80.9509562639596,
"count": 230753,
"is_parallel": true,
"self": 80.9509562639596
},
"communicator.exchange": {
"total": 1015.1120094879584,
"count": 230753,
"is_parallel": true,
"self": 1015.1120094879584
},
"steps_from_proto": {
"total": 95.49155940107858,
"count": 230753,
"is_parallel": true,
"self": 41.503314095765745,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.98824530531283,
"count": 461506,
"is_parallel": true,
"self": 53.98824530531283
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 463.5166923308848,
"count": 230754,
"self": 6.143675828133382,
"children": {
"process_trajectory": {
"total": 142.64098545474917,
"count": 230754,
"self": 141.47957682074912,
"children": {
"RLTrainer._checkpoint": {
"total": 1.161408634000054,
"count": 10,
"self": 1.161408634000054
}
}
},
"_update_policy": {
"total": 314.7320310480022,
"count": 97,
"self": 262.2284094870047,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.503621560997544,
"count": 2910,
"self": 52.503621560997544
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.180004442692734e-07,
"count": 1,
"self": 8.180004442692734e-07
},
"TrainerController._save_models": {
"total": 0.11466595999991114,
"count": 1,
"self": 0.0018757629995889147,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11279019700032222,
"count": 1,
"self": 0.11279019700032222
}
}
}
}
}
}
}