deepRL5 / run_logs /timers.json
chenoi's picture
First Push
a865a5a
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9051458835601807,
"min": 0.8958672881126404,
"max": 2.8793461322784424,
"count": 50
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9120.25,
"min": 8070.8681640625,
"max": 32116.2265625,
"count": 50
},
"SnowballTarget.Step.mean": {
"value": 499912.0,
"min": 9952.0,
"max": 499912.0,
"count": 50
},
"SnowballTarget.Step.sum": {
"value": 499912.0,
"min": 9952.0,
"max": 499912.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 14.552339553833008,
"min": 0.1313910037279129,
"max": 14.569454193115234,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 1469.7862548828125,
"min": 12.744927406311035,
"max": 1497.137451171875,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 13134.0,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 28.11320754716981,
"min": 2.522727272727273,
"max": 28.772727272727273,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1490.0,
"min": 111.0,
"max": 1579.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 28.11320754716981,
"min": 2.522727272727273,
"max": 28.772727272727273,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1490.0,
"min": 111.0,
"max": 1579.0,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.04314950444711619,
"min": 0.04314950444711619,
"max": 0.05254254118121667,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.08629900889423238,
"min": 0.08629900889423238,
"max": 0.15762762354365,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.15211700192269156,
"min": 0.0761607040596359,
"max": 0.2938694961807307,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.3042340038453831,
"min": 0.1523214081192718,
"max": 0.8294430902775596,
"count": 50
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 3.0000990000000026e-06,
"min": 3.0000990000000026e-06,
"max": 0.00029604000132,
"count": 50
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 6.000198000000005e-06,
"min": 6.000198000000005e-06,
"max": 0.0008366400211199997,
"count": 50
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.101,
"min": 0.101,
"max": 0.19867999999999997,
"count": 50
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.202,
"min": 0.202,
"max": 0.5788800000000001,
"count": 50
},
"SnowballTarget.Policy.Beta.mean": {
"value": 5.9900000000000047e-05,
"min": 5.9900000000000047e-05,
"max": 0.004934132000000001,
"count": 50
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00011980000000000009,
"min": 0.00011980000000000009,
"max": 0.013946112,
"count": 50
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1681200148",
"python_version": "3.8.13 (default, Oct 21 2022, 23:50:54) \n[GCC 11.2.0]",
"command_line_arguments": "/home/chenoi1/miniconda3/envs/dxtorch/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1",
"numpy_version": "1.21.2",
"end_time_seconds": "1681200713"
},
"total": 565.5419128559988,
"count": 1,
"self": 0.16774363999502384,
"children": {
"run_training.setup": {
"total": 0.02683232400158886,
"count": 1,
"self": 0.02683232400158886
},
"TrainerController.start_learning": {
"total": 565.3473368920022,
"count": 1,
"self": 0.760026288953668,
"children": {
"TrainerController._reset_env": {
"total": 0.4120313419989543,
"count": 1,
"self": 0.4120313419989543
},
"TrainerController.advance": {
"total": 564.1072649360503,
"count": 45543,
"self": 0.3256897511309944,
"children": {
"env_step": {
"total": 563.7815751849193,
"count": 45543,
"self": 432.97451339550025,
"children": {
"SubprocessEnvManager._take_step": {
"total": 130.45971468850985,
"count": 45543,
"self": 1.3622235474322224,
"children": {
"TorchPolicy.evaluate": {
"total": 129.09749114107763,
"count": 45543,
"self": 129.09749114107763
}
}
},
"workers": {
"total": 0.34734710090924636,
"count": 45543,
"self": 0.0,
"children": {
"worker_root": {
"total": 563.9830406965339,
"count": 45543,
"is_parallel": true,
"self": 237.55084753948177,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0013660969998454675,
"count": 1,
"is_parallel": true,
"self": 0.000625291999313049,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007408050005324185,
"count": 10,
"is_parallel": true,
"self": 0.0007408050005324185
}
}
},
"UnityEnvironment.step": {
"total": 0.017025307002768386,
"count": 1,
"is_parallel": true,
"self": 0.00018508100401959382,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001588810009707231,
"count": 1,
"is_parallel": true,
"self": 0.0001588810009707231
},
"communicator.exchange": {
"total": 0.016050547998020193,
"count": 1,
"is_parallel": true,
"self": 0.016050547998020193
},
"steps_from_proto": {
"total": 0.0006307969997578766,
"count": 1,
"is_parallel": true,
"self": 0.0001695300052233506,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000461266994534526,
"count": 10,
"is_parallel": true,
"self": 0.000461266994534526
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 326.4321931570521,
"count": 45542,
"is_parallel": true,
"self": 6.737098305497057,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 4.0071289960505965,
"count": 45542,
"is_parallel": true,
"self": 4.0071289960505965
},
"communicator.exchange": {
"total": 295.8229425705176,
"count": 45542,
"is_parallel": true,
"self": 295.8229425705176
},
"steps_from_proto": {
"total": 19.86502328498682,
"count": 45542,
"is_parallel": true,
"self": 4.0616002690767345,
"children": {
"_process_rank_one_or_two_observation": {
"total": 15.803423015910084,
"count": 455420,
"is_parallel": true,
"self": 15.803423015910084
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0007740990004094783,
"count": 1,
"self": 0.0007740990004094783,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 563.530333786719,
"count": 249787,
"is_parallel": true,
"self": 3.260401939831354,
"children": {
"process_trajectory": {
"total": 241.44248771587445,
"count": 249787,
"is_parallel": true,
"self": 240.37768866787883,
"children": {
"RLTrainer._checkpoint": {
"total": 1.0647990479956206,
"count": 10,
"is_parallel": true,
"self": 1.0647990479956206
}
}
},
"_update_policy": {
"total": 318.8274441310132,
"count": 113,
"is_parallel": true,
"self": 75.21916118503577,
"children": {
"TorchPPOOptimizer.update": {
"total": 243.6082829459774,
"count": 9605,
"is_parallel": true,
"self": 243.6082829459774
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.06724022599883028,
"count": 1,
"self": 0.0004834379979001824,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0667567880009301,
"count": 1,
"self": 0.0667567880009301
}
}
}
}
}
}
}