ripayani's picture
First Push
ece5425 verified
raw
history blame
No virus
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7972696423530579,
"min": 0.7972696423530579,
"max": 2.8693957328796387,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7621.1005859375,
"min": 7621.1005859375,
"max": 29385.482421875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.898280143737793,
"min": 0.2261432409286499,
"max": 12.927705764770508,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2515.16455078125,
"min": 43.871788024902344,
"max": 2628.272705078125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07074099625962987,
"min": 0.059892997334998865,
"max": 0.07476645705641669,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2829639850385195,
"min": 0.26898164113791767,
"max": 0.3738322852820834,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18745465488994822,
"min": 0.10992687614292235,
"max": 0.29241736554632,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7498186195597929,
"min": 0.4397075045716894,
"max": 1.4110524777103872,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.795454545454547,
"min": 3.159090909090909,
"max": 25.795454545454547,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1135.0,
"min": 139.0,
"max": 1418.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.795454545454547,
"min": 3.159090909090909,
"max": 25.795454545454547,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1135.0,
"min": 139.0,
"max": 1418.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1709064053",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1709064479"
},
"total": 425.32067691400016,
"count": 1,
"self": 0.5055158380000648,
"children": {
"run_training.setup": {
"total": 0.04904546800025855,
"count": 1,
"self": 0.04904546800025855
},
"TrainerController.start_learning": {
"total": 424.76611560799984,
"count": 1,
"self": 0.5386474889883175,
"children": {
"TrainerController._reset_env": {
"total": 3.3247430090000307,
"count": 1,
"self": 3.3247430090000307
},
"TrainerController.advance": {
"total": 420.8060031900113,
"count": 18199,
"self": 0.2492365579946636,
"children": {
"env_step": {
"total": 420.55676663201666,
"count": 18199,
"self": 269.29651549705477,
"children": {
"SubprocessEnvManager._take_step": {
"total": 150.9740685239476,
"count": 18199,
"self": 1.3922337919661913,
"children": {
"TorchPolicy.evaluate": {
"total": 149.58183473198142,
"count": 18199,
"self": 149.58183473198142
}
}
},
"workers": {
"total": 0.2861826110142829,
"count": 18199,
"self": 0.0,
"children": {
"worker_root": {
"total": 423.6133918080004,
"count": 18199,
"is_parallel": true,
"self": 212.28070976200888,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.008940886000345927,
"count": 1,
"is_parallel": true,
"self": 0.007243386000027385,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016975000003185414,
"count": 10,
"is_parallel": true,
"self": 0.0016975000003185414
}
}
},
"UnityEnvironment.step": {
"total": 0.03647802399973443,
"count": 1,
"is_parallel": true,
"self": 0.0007041739995656826,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004101650001757662,
"count": 1,
"is_parallel": true,
"self": 0.0004101650001757662
},
"communicator.exchange": {
"total": 0.03338524499986306,
"count": 1,
"is_parallel": true,
"self": 0.03338524499986306
},
"steps_from_proto": {
"total": 0.001978440000129922,
"count": 1,
"is_parallel": true,
"self": 0.0003974599994762684,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015809800006536534,
"count": 10,
"is_parallel": true,
"self": 0.0015809800006536534
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 211.33268204599153,
"count": 18198,
"is_parallel": true,
"self": 10.457894110983034,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.106277033002243,
"count": 18198,
"is_parallel": true,
"self": 5.106277033002243
},
"communicator.exchange": {
"total": 163.31831855196106,
"count": 18198,
"is_parallel": true,
"self": 163.31831855196106
},
"steps_from_proto": {
"total": 32.4501923500452,
"count": 18198,
"is_parallel": true,
"self": 5.985553979107408,
"children": {
"_process_rank_one_or_two_observation": {
"total": 26.46463837093779,
"count": 181980,
"is_parallel": true,
"self": 26.46463837093779
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00017040000011547818,
"count": 1,
"self": 0.00017040000011547818,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 415.4605800590725,
"count": 665379,
"is_parallel": true,
"self": 14.015236054357501,
"children": {
"process_trajectory": {
"total": 228.53604477271756,
"count": 665379,
"is_parallel": true,
"self": 228.0743462227183,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4616985499992552,
"count": 4,
"is_parallel": true,
"self": 0.4616985499992552
}
}
},
"_update_policy": {
"total": 172.90929923199747,
"count": 90,
"is_parallel": true,
"self": 47.91592303900961,
"children": {
"TorchPPOOptimizer.update": {
"total": 124.99337619298785,
"count": 4587,
"is_parallel": true,
"self": 124.99337619298785
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.09655152000004819,
"count": 1,
"self": 0.000987370000075316,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09556414999997287,
"count": 1,
"self": 0.09556414999997287
}
}
}
}
}
}
}