MLAgents-Worm / run_logs /timers.json
meln1k's picture
First Worm
b1324cb
{
"name": "root",
"gauges": {
"Worm.Policy.Entropy.mean": {
"value": 0.7742936015129089,
"min": 0.7742936015129089,
"max": 1.423706293106079,
"count": 233
},
"Worm.Policy.Entropy.sum": {
"value": 23228.80859375,
"min": 23228.80859375,
"max": 42711.1875,
"count": 233
},
"Worm.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 999.0,
"max": 999.0,
"count": 233
},
"Worm.Environment.EpisodeLength.sum": {
"value": 29970.0,
"min": 29970.0,
"max": 29970.0,
"count": 233
},
"Worm.Step.mean": {
"value": 6989000.0,
"min": 29000.0,
"max": 6989000.0,
"count": 233
},
"Worm.Step.sum": {
"value": 6989000.0,
"min": 29000.0,
"max": 6989000.0,
"count": 233
},
"Worm.Policy.ExtrinsicValueEstimate.mean": {
"value": 242.0957489013672,
"min": 0.04105445370078087,
"max": 243.65689086914062,
"count": 233
},
"Worm.Policy.ExtrinsicValueEstimate.sum": {
"value": 7262.87255859375,
"min": 1.2316336631774902,
"max": 7309.70654296875,
"count": 233
},
"Worm.Environment.CumulativeReward.mean": {
"value": 1198.4819864908854,
"min": 0.22847376391291618,
"max": 1222.44765625,
"count": 233
},
"Worm.Environment.CumulativeReward.sum": {
"value": 35954.45959472656,
"min": 6.8542129173874855,
"max": 36673.4296875,
"count": 233
},
"Worm.Policy.ExtrinsicReward.mean": {
"value": 1198.4819864908854,
"min": 0.22847376391291618,
"max": 1222.44765625,
"count": 233
},
"Worm.Policy.ExtrinsicReward.sum": {
"value": 35954.45959472656,
"min": 6.8542129173874855,
"max": 36673.4296875,
"count": 233
},
"Worm.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 233
},
"Worm.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 233
},
"Worm.Losses.PolicyLoss.mean": {
"value": 0.01606008090467575,
"min": 0.011610666393867828,
"max": 0.02387814459195527,
"count": 232
},
"Worm.Losses.PolicyLoss.sum": {
"value": 0.01606008090467575,
"min": 0.011610666393867828,
"max": 0.02387814459195527,
"count": 232
},
"Worm.Losses.ValueLoss.mean": {
"value": 13.8456388655163,
"min": 0.0014668323315813073,
"max": 15.136433987390427,
"count": 232
},
"Worm.Losses.ValueLoss.sum": {
"value": 13.8456388655163,
"min": 0.0014668323315813073,
"max": 15.136433987390427,
"count": 232
},
"Worm.Policy.LearningRate.mean": {
"value": 1.7143851428571302e-06,
"min": 1.7143851428571302e-06,
"max": 0.00029871428614285713,
"count": 232
},
"Worm.Policy.LearningRate.sum": {
"value": 1.7143851428571302e-06,
"min": 1.7143851428571302e-06,
"max": 0.00029871428614285713,
"count": 232
},
"Worm.Policy.Epsilon.mean": {
"value": 0.10057142857142858,
"min": 0.10057142857142858,
"max": 0.1995714285714285,
"count": 232
},
"Worm.Policy.Epsilon.sum": {
"value": 0.10057142857142858,
"min": 0.10057142857142858,
"max": 0.1995714285714285,
"count": 232
},
"Worm.Policy.Beta.mean": {
"value": 3.851428571428551e-05,
"min": 3.851428571428551e-05,
"max": 0.004978614285714285,
"count": 232
},
"Worm.Policy.Beta.sum": {
"value": 3.851428571428551e-05,
"min": 3.851428571428551e-05,
"max": 0.004978614285714285,
"count": 232
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1657389915",
"python_version": "3.8.13 (default, Mar 28 2022, 11:38:47) \n[GCC 7.5.0]",
"command_line_arguments": "/home/nm/anaconda3/envs/ml-agents/bin/mlagents-learn ./config/ppo/Worm.yaml --env=./trained-envs-executables/linux/Worm/Worm --run-id=Worm Training --no-graphics --force",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.23.1",
"end_time_seconds": "1657395601"
},
"total": 5686.2653135269975,
"count": 1,
"self": 0.21825166399867157,
"children": {
"run_training.setup": {
"total": 0.009958452999853762,
"count": 1,
"self": 0.009958452999853762
},
"TrainerController.start_learning": {
"total": 5686.037103409999,
"count": 1,
"self": 6.074651844668551,
"children": {
"TrainerController._reset_env": {
"total": 2.3729323570005363,
"count": 1,
"self": 2.3729323570005363
},
"TrainerController.advance": {
"total": 5677.504107482331,
"count": 701000,
"self": 6.3626116886225645,
"children": {
"env_step": {
"total": 4741.273211906742,
"count": 701000,
"self": 4258.827497213797,
"children": {
"SubprocessEnvManager._take_step": {
"total": 478.60141630537146,
"count": 701000,
"self": 30.77560889383858,
"children": {
"TorchPolicy.evaluate": {
"total": 447.8258074115329,
"count": 701000,
"self": 100.60325676606044,
"children": {
"TorchPolicy.sample_actions": {
"total": 347.22255064547244,
"count": 701000,
"self": 347.22255064547244
}
}
}
}
},
"workers": {
"total": 3.844298387573872,
"count": 701000,
"self": 0.0,
"children": {
"worker_root": {
"total": 5671.579805337637,
"count": 701000,
"is_parallel": true,
"self": 1884.033489645517,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0007374469987553312,
"count": 1,
"is_parallel": true,
"self": 0.00021347999791032635,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005239670008450048,
"count": 2,
"is_parallel": true,
"self": 0.0005239670008450048
}
}
},
"UnityEnvironment.step": {
"total": 0.019578467999963323,
"count": 1,
"is_parallel": true,
"self": 0.00017259900050703436,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00033599800008232705,
"count": 1,
"is_parallel": true,
"self": 0.00033599800008232705
},
"communicator.exchange": {
"total": 0.018585862999316305,
"count": 1,
"is_parallel": true,
"self": 0.018585862999316305
},
"steps_from_proto": {
"total": 0.0004840080000576563,
"count": 1,
"is_parallel": true,
"self": 0.00014713000200572424,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0003368779980519321,
"count": 2,
"is_parallel": true,
"self": 0.0003368779980519321
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 3787.5463156921196,
"count": 700999,
"is_parallel": true,
"self": 90.06624167344853,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 161.92172522699002,
"count": 700999,
"is_parallel": true,
"self": 161.92172522699002
},
"communicator.exchange": {
"total": 3337.9721602264053,
"count": 700999,
"is_parallel": true,
"self": 3337.9721602264053
},
"steps_from_proto": {
"total": 197.58618856527573,
"count": 700999,
"is_parallel": true,
"self": 67.12977652093468,
"children": {
"_process_rank_one_or_two_observation": {
"total": 130.45641204434105,
"count": 1401998,
"is_parallel": true,
"self": 130.45641204434105
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 929.8682838869663,
"count": 701000,
"self": 6.682292772185974,
"children": {
"process_trajectory": {
"total": 243.04347511178094,
"count": 701000,
"self": 241.94448678477966,
"children": {
"RLTrainer._checkpoint": {
"total": 1.0989883270012797,
"count": 14,
"self": 1.0989883270012797
}
}
},
"_update_policy": {
"total": 680.1425160029994,
"count": 233,
"self": 585.2013225309984,
"children": {
"TorchPPOOptimizer.update": {
"total": 94.94119347200103,
"count": 9786,
"self": 94.94119347200103
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.09997335029766e-07,
"count": 1,
"self": 5.09997335029766e-07
},
"TrainerController._save_models": {
"total": 0.08541121600137558,
"count": 1,
"self": 0.0010737090015027206,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08433750699987286,
"count": 1,
"self": 0.08433750699987286
}
}
}
}
}
}
}