Corianas's picture
First Push
1f43930
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9148550629615784,
"min": 0.9148550629615784,
"max": 2.871063709259033,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8775.2900390625,
"min": 8775.2900390625,
"max": 29592.052734375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.279151916503906,
"min": 0.3671874701976776,
"max": 13.279151916503906,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2589.4345703125,
"min": 71.23436737060547,
"max": 2705.75341796875,
"count": 20
},
"SnowballTarget.Policy.CuriosityValueEstimate.mean": {
"value": 0.09546870738267899,
"min": -0.016660604625940323,
"max": 0.14152777194976807,
"count": 20
},
"SnowballTarget.Policy.CuriosityValueEstimate.sum": {
"value": 18.616397857666016,
"min": -3.2321572303771973,
"max": 28.998111724853516,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06405558330623214,
"min": 0.06131177520457605,
"max": 0.07274797403906454,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.25622233322492854,
"min": 0.2452471008183042,
"max": 0.3578633572468985,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.09076593071222307,
"min": 0.06346269041886463,
"max": 0.14713705880384823,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.36306372284889227,
"min": 0.2538507616754585,
"max": 0.7356852940192411,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.347009730600001e-05,
"min": 1.347009730600001e-05,
"max": 0.00048647000270600005,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 5.388038922400004e-05,
"min": 5.388038922400004e-05,
"max": 0.00230860003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Losses.CuriosityForwardLoss.mean": {
"value": 0.048958304703381715,
"min": 0.04579419151459839,
"max": 0.15781561710743924,
"count": 20
},
"SnowballTarget.Losses.CuriosityForwardLoss.sum": {
"value": 0.19583321881352686,
"min": 0.18317676605839356,
"max": 0.631262468429757,
"count": 20
},
"SnowballTarget.Losses.CuriosityInverseLoss.mean": {
"value": 0.7864058046948676,
"min": 0.7864058046948676,
"max": 2.746093947440386,
"count": 20
},
"SnowballTarget.Losses.CuriosityInverseLoss.sum": {
"value": 3.1456232187794706,
"min": 3.1456232187794706,
"max": 12.928630973778517,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.204545454545453,
"min": 3.5681818181818183,
"max": 26.227272727272727,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1153.0,
"min": 157.0,
"max": 1437.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.204545454545453,
"min": 3.5681818181818183,
"max": 26.227272727272727,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1153.0,
"min": 157.0,
"max": 1437.0,
"count": 20
},
"SnowballTarget.Policy.CuriosityReward.mean": {
"value": 0.20874647542156957,
"min": 0.18558931982737373,
"max": 0.3610852147791196,
"count": 20
},
"SnowballTarget.Policy.CuriosityReward.sum": {
"value": 9.18484491854906,
"min": 8.369245985755697,
"max": 18.326926595065743,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679309527",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --force --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679310023"
},
"total": 496.126232927,
"count": 1,
"self": 0.4321505809999735,
"children": {
"run_training.setup": {
"total": 0.10164176199998565,
"count": 1,
"self": 0.10164176199998565
},
"TrainerController.start_learning": {
"total": 495.59244058400003,
"count": 1,
"self": 0.5057200699996542,
"children": {
"TrainerController._reset_env": {
"total": 5.669090515999983,
"count": 1,
"self": 5.669090515999983
},
"TrainerController.advance": {
"total": 489.27926407600035,
"count": 18207,
"self": 0.26358151098952476,
"children": {
"env_step": {
"total": 489.0156825650108,
"count": 18207,
"self": 374.3602393810067,
"children": {
"SubprocessEnvManager._take_step": {
"total": 114.40971856100134,
"count": 18207,
"self": 2.4003941179956314,
"children": {
"TorchPolicy.evaluate": {
"total": 112.00932444300571,
"count": 18207,
"self": 112.00932444300571
}
}
},
"workers": {
"total": 0.2457246230027863,
"count": 18207,
"self": 0.0,
"children": {
"worker_root": {
"total": 494.29986508100575,
"count": 18207,
"is_parallel": true,
"self": 261.6230163290066,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002236798000012641,
"count": 1,
"is_parallel": true,
"self": 0.000609027999985301,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016277700000273398,
"count": 10,
"is_parallel": true,
"self": 0.0016277700000273398
}
}
},
"UnityEnvironment.step": {
"total": 0.036456725999983064,
"count": 1,
"is_parallel": true,
"self": 0.00034808499987093455,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00038980500005436625,
"count": 1,
"is_parallel": true,
"self": 0.00038980500005436625
},
"communicator.exchange": {
"total": 0.034627486000033514,
"count": 1,
"is_parallel": true,
"self": 0.034627486000033514
},
"steps_from_proto": {
"total": 0.0010913500000242493,
"count": 1,
"is_parallel": true,
"self": 0.00023414499992213678,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0008572050001021125,
"count": 10,
"is_parallel": true,
"self": 0.0008572050001021125
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 232.67684875199916,
"count": 18206,
"is_parallel": true,
"self": 9.171589949998577,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.068321562999074,
"count": 18206,
"is_parallel": true,
"self": 5.068321562999074
},
"communicator.exchange": {
"total": 188.43024679299663,
"count": 18206,
"is_parallel": true,
"self": 188.43024679299663
},
"steps_from_proto": {
"total": 30.006690446004882,
"count": 18206,
"is_parallel": true,
"self": 5.899886635006624,
"children": {
"_process_rank_one_or_two_observation": {
"total": 24.106803810998258,
"count": 182060,
"is_parallel": true,
"self": 24.106803810998258
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00011686300001656491,
"count": 1,
"self": 0.00011686300001656491,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 486.4422729110031,
"count": 354380,
"is_parallel": true,
"self": 8.490131191010732,
"children": {
"process_trajectory": {
"total": 253.2704820709924,
"count": 354380,
"is_parallel": true,
"self": 252.10741830299247,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1630637679999154,
"count": 4,
"is_parallel": true,
"self": 1.1630637679999154
}
}
},
"_update_policy": {
"total": 224.68165964899998,
"count": 90,
"is_parallel": true,
"self": 138.39125454399885,
"children": {
"TorchPPOOptimizer.update": {
"total": 86.29040510500113,
"count": 4584,
"is_parallel": true,
"self": 86.29040510500113
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1382490590000316,
"count": 1,
"self": 0.0010270330000139438,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13722202600001765,
"count": 1,
"self": 0.13722202600001765
}
}
}
}
}
}
}