Shresthadev403 commited on
Commit
033d8aa
1 Parent(s): 299077a

End of training

Browse files
README.md CHANGED
@@ -17,13 +17,13 @@ should probably proofread and complete it, then remove this comment. -->
17
 
18
  This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the food101 dataset.
19
  It achieves the following results on the evaluation set:
20
- - eval_loss: 0.8235
21
- - eval_accuracy: 0.8044
22
- - eval_runtime: 153.6348
23
- - eval_samples_per_second: 98.61
24
- - eval_steps_per_second: 6.164
25
- - epoch: 14.78
26
- - step: 14000
27
 
28
  ## Model description
29
 
 
17
 
18
  This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the food101 dataset.
19
  It achieves the following results on the evaluation set:
20
+ - eval_loss: 0.7959
21
+ - eval_accuracy: 0.8075
22
+ - eval_runtime: 153.9402
23
+ - eval_samples_per_second: 98.415
24
+ - eval_steps_per_second: 6.152
25
+ - epoch: 15.84
26
+ - step: 15000
27
 
28
  ## Model description
29
 
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:44fb177d54f7d920ebec7ed9d305e79d34fbbd63d2870afcc2ea43abebc25eb9
3
  size 343528508
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1b09c44bb650673e03d905cbf2cfecd3c70f20f9fc55fff7e74ce2f82877fdb
3
  size 343528508
runs/Feb05_03-08-58_983d148b451b/events.out.tfevents.1707102539.983d148b451b.26.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:45434541e41ee33f030b4689849ab4629e7ae88b07aa47ea7fe666830806a396
3
- size 16021
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c35e15cb5d3094dc101119d8841eae79ffdf984df25ffddf26551440b87fe5c8
3
+ size 16501
trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.7984158415841585,
3
- "best_model_checkpoint": "food-image-classification/checkpoint-13000",
4
- "epoch": 14.783526927138332,
5
  "eval_steps": 1000,
6
- "global_step": 14000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -217,6 +217,21 @@
217
  "eval_samples_per_second": 98.61,
218
  "eval_steps_per_second": 6.164,
219
  "step": 14000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
220
  }
221
  ],
222
  "logging_steps": 1000,
@@ -224,7 +239,7 @@
224
  "num_input_tokens_seen": 0,
225
  "num_train_epochs": 500,
226
  "save_steps": 1000,
227
- "total_flos": 6.948574402919547e+19,
228
  "train_batch_size": 16,
229
  "trial_name": null,
230
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.8043564356435644,
3
+ "best_model_checkpoint": "food-image-classification/checkpoint-14000",
4
+ "epoch": 15.839493136219641,
5
  "eval_steps": 1000,
6
+ "global_step": 15000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
217
  "eval_samples_per_second": 98.61,
218
  "eval_steps_per_second": 6.164,
219
  "step": 14000
220
+ },
221
+ {
222
+ "epoch": 15.84,
223
+ "learning_rate": 1.583949313621964e-05,
224
+ "loss": 0.6281,
225
+ "step": 15000
226
+ },
227
+ {
228
+ "epoch": 15.84,
229
+ "eval_accuracy": 0.8075247524752476,
230
+ "eval_loss": 0.7959182858467102,
231
+ "eval_runtime": 153.9402,
232
+ "eval_samples_per_second": 98.415,
233
+ "eval_steps_per_second": 6.152,
234
+ "step": 15000
235
  }
236
  ],
237
  "logging_steps": 1000,
 
239
  "num_input_tokens_seen": 0,
240
  "num_train_epochs": 500,
241
  "save_steps": 1000,
242
+ "total_flos": 7.444901145985229e+19,
243
  "train_batch_size": 16,
244
  "trial_name": null,
245
  "trial_params": null