aga3134's picture
First Push
2ca4852
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.23453360795974731,
"min": 0.1953631490468979,
"max": 1.50232994556427,
"count": 66
},
"Pyramids.Policy.Entropy.sum": {
"value": 7054.77099609375,
"min": 5920.28466796875,
"max": 45574.6796875,
"count": 66
},
"Pyramids.Step.mean": {
"value": 1979915.0,
"min": 29952.0,
"max": 1979915.0,
"count": 66
},
"Pyramids.Step.sum": {
"value": 1979915.0,
"min": 29952.0,
"max": 1979915.0,
"count": 66
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6958987712860107,
"min": -0.1269349753856659,
"max": 0.7781063914299011,
"count": 66
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 204.59423828125,
"min": -30.083589553833008,
"max": 229.1328125,
"count": 66
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.01174519956111908,
"min": -0.017928695306181908,
"max": 0.2730512022972107,
"count": 66
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.4530887603759766,
"min": -5.002106189727783,
"max": 65.53228759765625,
"count": 66
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06838184033473399,
"min": 0.06509955757064745,
"max": 0.07357309589991091,
"count": 66
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9573457646862759,
"min": 0.4990568511320503,
"max": 1.0883748224393153,
"count": 66
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016964178403820068,
"min": 0.0001750800621381293,
"max": 0.018523617723904963,
"count": 66
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23749849765348094,
"min": 0.00245112086993381,
"max": 0.2778542658585744,
"count": 66
},
"Pyramids.Policy.LearningRate.mean": {
"value": 5.182523272524998e-06,
"min": 5.182523272524998e-06,
"max": 0.0002975753150939428,
"count": 66
},
"Pyramids.Policy.LearningRate.sum": {
"value": 7.255532581534997e-05,
"min": 7.255532581534997e-05,
"max": 0.0038541827652724494,
"count": 66
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.101727475,
"min": 0.101727475,
"max": 0.19919177142857142,
"count": 66
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.42418465,
"min": 1.3943424,
"max": 2.68472755,
"count": 66
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00018257475249999997,
"min": 0.00018257475249999997,
"max": 0.009919257965714285,
"count": 66
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0025560465349999993,
"min": 0.0025560465349999993,
"max": 0.128484282245,
"count": 66
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.007338341791182756,
"min": 0.006963604129850864,
"max": 0.3577478528022766,
"count": 66
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.10273678600788116,
"min": 0.10009976476430893,
"max": 2.504235029220581,
"count": 66
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 272.32142857142856,
"min": 238.89166666666668,
"max": 999.0,
"count": 66
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30500.0,
"min": 15984.0,
"max": 33984.0,
"count": 66
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7273423266303432,
"min": -1.0000000521540642,
"max": 1.7437719170723045,
"count": 66
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 191.7349982559681,
"min": -31.99200163781643,
"max": 212.2681984603405,
"count": 66
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7273423266303432,
"min": -1.0000000521540642,
"max": 1.7437719170723045,
"count": 66
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 191.7349982559681,
"min": -31.99200163781643,
"max": 212.2681984603405,
"count": 66
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.02087978300577405,
"min": 0.01831468593421554,
"max": 7.145384430885315,
"count": 66
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.3176559136409196,
"min": 2.0752130590813067,
"max": 114.32615089416504,
"count": 66
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 66
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 66
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1686640391",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=PyramidsTraining --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1686645201"
},
"total": 4810.281842308999,
"count": 1,
"self": 0.47396022899920354,
"children": {
"run_training.setup": {
"total": 0.03673959800016746,
"count": 1,
"self": 0.03673959800016746
},
"TrainerController.start_learning": {
"total": 4809.771142482,
"count": 1,
"self": 2.9703759651001747,
"children": {
"TrainerController._reset_env": {
"total": 4.93024182299996,
"count": 1,
"self": 4.93024182299996
},
"TrainerController.advance": {
"total": 4801.778350416899,
"count": 128943,
"self": 2.938854371655907,
"children": {
"env_step": {
"total": 3517.5195383514674,
"count": 128943,
"self": 3285.564847483598,
"children": {
"SubprocessEnvManager._take_step": {
"total": 230.26380734010309,
"count": 128943,
"self": 9.927900544920703,
"children": {
"TorchPolicy.evaluate": {
"total": 220.33590679518238,
"count": 125059,
"self": 220.33590679518238
}
}
},
"workers": {
"total": 1.6908835277663457,
"count": 128943,
"self": 0.0,
"children": {
"worker_root": {
"total": 4798.498668241822,
"count": 128943,
"is_parallel": true,
"self": 1755.108176312635,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018128860001525027,
"count": 1,
"is_parallel": true,
"self": 0.0005956989998594509,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012171870002930518,
"count": 8,
"is_parallel": true,
"self": 0.0012171870002930518
}
}
},
"UnityEnvironment.step": {
"total": 0.08455248000063875,
"count": 1,
"is_parallel": true,
"self": 0.0006043680004950147,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004716359999292763,
"count": 1,
"is_parallel": true,
"self": 0.0004716359999292763
},
"communicator.exchange": {
"total": 0.08160083100028714,
"count": 1,
"is_parallel": true,
"self": 0.08160083100028714
},
"steps_from_proto": {
"total": 0.0018756449999273173,
"count": 1,
"is_parallel": true,
"self": 0.00034731800042209215,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001528326999505225,
"count": 8,
"is_parallel": true,
"self": 0.001528326999505225
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 3043.390491929187,
"count": 128942,
"is_parallel": true,
"self": 67.76356223221137,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 49.17509011002221,
"count": 128942,
"is_parallel": true,
"self": 49.17509011002221
},
"communicator.exchange": {
"total": 2709.1360837351376,
"count": 128942,
"is_parallel": true,
"self": 2709.1360837351376
},
"steps_from_proto": {
"total": 217.315755851816,
"count": 128942,
"is_parallel": true,
"self": 43.748884456006635,
"children": {
"_process_rank_one_or_two_observation": {
"total": 173.56687139580936,
"count": 1031536,
"is_parallel": true,
"self": 173.56687139580936
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1281.319957693776,
"count": 128943,
"self": 5.784639683560272,
"children": {
"process_trajectory": {
"total": 232.45521055223162,
"count": 128943,
"self": 231.99035041023035,
"children": {
"RLTrainer._checkpoint": {
"total": 0.464860142001271,
"count": 4,
"self": 0.464860142001271
}
}
},
"_update_policy": {
"total": 1043.080107457984,
"count": 924,
"self": 667.0150730828855,
"children": {
"TorchPPOOptimizer.update": {
"total": 376.06503437509855,
"count": 45585,
"self": 376.06503437509855
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1880001693498343e-06,
"count": 1,
"self": 1.1880001693498343e-06
},
"TrainerController._save_models": {
"total": 0.09217308900042553,
"count": 1,
"self": 0.0013682540011359379,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0908048349992896,
"count": 1,
"self": 0.0908048349992896
}
}
}
}
}
}
}