aleix8's picture
First Try
e3e1aa4
raw
history blame contribute delete
No virus
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.79984050989151,
"min": 0.79984050989151,
"max": 2.8636891841888428,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7636.876953125,
"min": 7636.876953125,
"max": 29390.04296875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.74717903137207,
"min": 0.33841487765312195,
"max": 12.74717903137207,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2485.699951171875,
"min": 65.6524887084961,
"max": 2583.771484375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06805893188963333,
"min": 0.06161286963569913,
"max": 0.07754965329978743,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2722357275585333,
"min": 0.24645147854279653,
"max": 0.3877482664989371,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20492399892970625,
"min": 0.11713304019683754,
"max": 0.2836487629980433,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.819695995718825,
"min": 0.46853216078735016,
"max": 1.4004580840906677,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.386363636363637,
"min": 3.0,
"max": 25.386363636363637,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1117.0,
"min": 132.0,
"max": 1388.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.386363636363637,
"min": 3.0,
"max": 25.386363636363637,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1117.0,
"min": 132.0,
"max": 1388.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1699349397",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1699349778"
},
"total": 381.6161183879999,
"count": 1,
"self": 0.4334244540000327,
"children": {
"run_training.setup": {
"total": 0.03958878899993579,
"count": 1,
"self": 0.03958878899993579
},
"TrainerController.start_learning": {
"total": 381.14310514499994,
"count": 1,
"self": 0.5766226649921009,
"children": {
"TrainerController._reset_env": {
"total": 0.8002962040000057,
"count": 1,
"self": 0.8002962040000057
},
"TrainerController.advance": {
"total": 379.7065868270083,
"count": 18200,
"self": 0.2837908580004296,
"children": {
"env_step": {
"total": 379.4227959690079,
"count": 18200,
"self": 296.6351749239375,
"children": {
"SubprocessEnvManager._take_step": {
"total": 82.37826636203681,
"count": 18200,
"self": 1.268191909064626,
"children": {
"TorchPolicy.evaluate": {
"total": 81.11007445297219,
"count": 18200,
"self": 81.11007445297219
}
}
},
"workers": {
"total": 0.4093546830335981,
"count": 18200,
"self": 0.0,
"children": {
"worker_root": {
"total": 380.0770244110572,
"count": 18200,
"is_parallel": true,
"self": 159.91524219209805,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0015555409995613445,
"count": 1,
"is_parallel": true,
"self": 0.0003600229993026005,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001195518000258744,
"count": 10,
"is_parallel": true,
"self": 0.001195518000258744
}
}
},
"UnityEnvironment.step": {
"total": 0.030114588999822445,
"count": 1,
"is_parallel": true,
"self": 0.0004945000000589062,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003430699998716591,
"count": 1,
"is_parallel": true,
"self": 0.0003430699998716591
},
"communicator.exchange": {
"total": 0.02773112900013075,
"count": 1,
"is_parallel": true,
"self": 0.02773112900013075
},
"steps_from_proto": {
"total": 0.0015458899997611297,
"count": 1,
"is_parallel": true,
"self": 0.00030708999975104234,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012388000000100874,
"count": 10,
"is_parallel": true,
"self": 0.0012388000000100874
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 220.16178221895916,
"count": 18199,
"is_parallel": true,
"self": 8.80121705201509,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.22758422102288,
"count": 18199,
"is_parallel": true,
"self": 5.22758422102288
},
"communicator.exchange": {
"total": 177.37214505596876,
"count": 18199,
"is_parallel": true,
"self": 177.37214505596876
},
"steps_from_proto": {
"total": 28.760835889952432,
"count": 18199,
"is_parallel": true,
"self": 5.628736822942301,
"children": {
"_process_rank_one_or_two_observation": {
"total": 23.13209906701013,
"count": 181990,
"is_parallel": true,
"self": 23.13209906701013
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00038570899960177485,
"count": 1,
"self": 0.00038570899960177485,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 376.7488000910771,
"count": 431865,
"is_parallel": true,
"self": 7.867518565124556,
"children": {
"process_trajectory": {
"total": 215.23516756395247,
"count": 431865,
"is_parallel": true,
"self": 214.80835397295323,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4268135909992452,
"count": 4,
"is_parallel": true,
"self": 0.4268135909992452
}
}
},
"_update_policy": {
"total": 153.64611396200007,
"count": 90,
"is_parallel": true,
"self": 44.58889974500789,
"children": {
"TorchPPOOptimizer.update": {
"total": 109.05721421699218,
"count": 4584,
"is_parallel": true,
"self": 109.05721421699218
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.05921373999990465,
"count": 1,
"self": 0.0008984599999166676,
"children": {
"RLTrainer._checkpoint": {
"total": 0.05831527999998798,
"count": 1,
"self": 0.05831527999998798
}
}
}
}
}
}
}