| { |
| "best_metric": 0.9458666666666666, |
| "best_model_checkpoint": "./vit_finetuned_models_dataset/CIFAR100/50_from_100/google_vit-base-patch16-224/model_idx_0084/checkpoints/checkpoint-2664", |
| "epoch": 8.0, |
| "eval_steps": 500, |
| "global_step": 2664, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 1.0, |
| "grad_norm": 0.39594656229019165, |
| "learning_rate": 9.621650897886543e-05, |
| "loss": 0.6536, |
| "step": 333 |
| }, |
| { |
| "epoch": 1.0, |
| "eval_accuracy": 0.9184, |
| "eval_loss": 0.2690630555152893, |
| "eval_runtime": 11.4441, |
| "eval_samples_per_second": 327.68, |
| "eval_steps_per_second": 5.155, |
| "step": 333 |
| }, |
| { |
| "epoch": 2.0, |
| "grad_norm": 0.034524980932474136, |
| "learning_rate": 8.539700818671957e-05, |
| "loss": 0.1537, |
| "step": 666 |
| }, |
| { |
| "epoch": 2.0, |
| "eval_accuracy": 0.9293333333333333, |
| "eval_loss": 0.2537638247013092, |
| "eval_runtime": 11.1129, |
| "eval_samples_per_second": 337.444, |
| "eval_steps_per_second": 5.309, |
| "step": 666 |
| }, |
| { |
| "epoch": 3.0, |
| "grad_norm": 0.24914999306201935, |
| "learning_rate": 6.918863377282392e-05, |
| "loss": 0.0614, |
| "step": 999 |
| }, |
| { |
| "epoch": 3.0, |
| "eval_accuracy": 0.9368, |
| "eval_loss": 0.25443124771118164, |
| "eval_runtime": 13.4522, |
| "eval_samples_per_second": 278.766, |
| "eval_steps_per_second": 4.386, |
| "step": 999 |
| }, |
| { |
| "epoch": 4.0, |
| "grad_norm": 0.15276160836219788, |
| "learning_rate": 5.005896381241413e-05, |
| "loss": 0.026, |
| "step": 1332 |
| }, |
| { |
| "epoch": 4.0, |
| "eval_accuracy": 0.9352, |
| "eval_loss": 0.26045969128608704, |
| "eval_runtime": 13.2744, |
| "eval_samples_per_second": 282.499, |
| "eval_steps_per_second": 4.445, |
| "step": 1332 |
| }, |
| { |
| "epoch": 5.0, |
| "grad_norm": 0.007646479178220034, |
| "learning_rate": 3.0920317146072576e-05, |
| "loss": 0.0072, |
| "step": 1665 |
| }, |
| { |
| "epoch": 5.0, |
| "eval_accuracy": 0.9448, |
| "eval_loss": 0.2405226230621338, |
| "eval_runtime": 10.6496, |
| "eval_samples_per_second": 352.126, |
| "eval_steps_per_second": 5.54, |
| "step": 1665 |
| }, |
| { |
| "epoch": 6.0, |
| "grad_norm": 0.010624893009662628, |
| "learning_rate": 1.4686379236485708e-05, |
| "loss": 0.0023, |
| "step": 1998 |
| }, |
| { |
| "epoch": 6.0, |
| "eval_accuracy": 0.9445333333333333, |
| "eval_loss": 0.24376659095287323, |
| "eval_runtime": 10.614, |
| "eval_samples_per_second": 353.308, |
| "eval_steps_per_second": 5.559, |
| "step": 1998 |
| }, |
| { |
| "epoch": 7.0, |
| "grad_norm": 0.02559749223291874, |
| "learning_rate": 3.82861996937452e-06, |
| "loss": 0.0011, |
| "step": 2331 |
| }, |
| { |
| "epoch": 7.0, |
| "eval_accuracy": 0.9437333333333333, |
| "eval_loss": 0.24647106230258942, |
| "eval_runtime": 10.7083, |
| "eval_samples_per_second": 350.195, |
| "eval_steps_per_second": 5.51, |
| "step": 2331 |
| }, |
| { |
| "epoch": 8.0, |
| "grad_norm": 0.006798462010920048, |
| "learning_rate": 3.476732383100867e-11, |
| "loss": 0.001, |
| "step": 2664 |
| }, |
| { |
| "epoch": 8.0, |
| "eval_accuracy": 0.9458666666666666, |
| "eval_loss": 0.24225540459156036, |
| "eval_runtime": 10.4106, |
| "eval_samples_per_second": 360.208, |
| "eval_steps_per_second": 5.667, |
| "step": 2664 |
| } |
| ], |
| "logging_steps": 500, |
| "max_steps": 2664, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 8, |
| "save_steps": 500, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": true |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 1.317930564980736e+19, |
| "train_batch_size": 64, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|