Shresthadev403 commited on
Commit
f41ee1f
1 Parent(s): 2b034f8

End of training

Browse files
README.md CHANGED
@@ -17,13 +17,13 @@ should probably proofread and complete it, then remove this comment. -->
17
 
18
  This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the food101 dataset.
19
  It achieves the following results on the evaluation set:
20
- - eval_loss: 4.0309
21
- - eval_accuracy: 0.5498
22
- - eval_runtime: 155.2257
23
- - eval_samples_per_second: 97.6
24
- - eval_steps_per_second: 6.101
25
- - epoch: 3.17
26
- - step: 3000
27
 
28
  ## Model description
29
 
 
17
 
18
  This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the food101 dataset.
19
  It achieves the following results on the evaluation set:
20
+ - eval_loss: 3.6342
21
+ - eval_accuracy: 0.6112
22
+ - eval_runtime: 156.2293
23
+ - eval_samples_per_second: 96.973
24
+ - eval_steps_per_second: 6.062
25
+ - epoch: 4.22
26
+ - step: 4000
27
 
28
  ## Model description
29
 
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2aafa514affb39eef02d6b09c36373690891aa8b9b61a367c6de4cac2cd4e3d2
3
  size 343528508
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a574228757dbeb58dda7eb7c5137d9c6fe0952f3ba4faf5b4a1392ad3aa32e3c
3
  size 343528508
runs/Feb05_03-08-58_983d148b451b/events.out.tfevents.1707102539.983d148b451b.26.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9d0f28c711aacbeaf928d569dcf9e4e65de6f0e93246ad395072bd5a36a7abdf
3
- size 10741
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9bad0bdc4c25c75c7a63f8ccf285a91b4e697e6615fec6b0cbdfeb86d6c9e4fb
3
+ size 11221
trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.3103630363036304,
3
- "best_model_checkpoint": "food-image-classification/checkpoint-2000",
4
- "epoch": 3.167898627243928,
5
  "eval_steps": 1000,
6
- "global_step": 3000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -52,6 +52,21 @@
52
  "eval_samples_per_second": 97.6,
53
  "eval_steps_per_second": 6.101,
54
  "step": 3000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  }
56
  ],
57
  "logging_steps": 1000,
@@ -59,7 +74,7 @@
59
  "num_input_tokens_seen": 0,
60
  "num_train_epochs": 500,
61
  "save_steps": 1000,
62
- "total_flos": 1.4889802291970458e+19,
63
  "train_batch_size": 16,
64
  "trial_name": null,
65
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.5498349834983498,
3
+ "best_model_checkpoint": "food-image-classification/checkpoint-3000",
4
+ "epoch": 4.223864836325237,
5
  "eval_steps": 1000,
6
+ "global_step": 4000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
52
  "eval_samples_per_second": 97.6,
53
  "eval_steps_per_second": 6.101,
54
  "step": 3000
55
+ },
56
+ {
57
+ "epoch": 4.22,
58
+ "learning_rate": 4.223864836325238e-06,
59
+ "loss": 3.8257,
60
+ "step": 4000
61
+ },
62
+ {
63
+ "epoch": 4.22,
64
+ "eval_accuracy": 0.6111551155115511,
65
+ "eval_loss": 3.634243965148926,
66
+ "eval_runtime": 156.2293,
67
+ "eval_samples_per_second": 96.973,
68
+ "eval_steps_per_second": 6.062,
69
+ "step": 4000
70
  }
71
  ],
72
  "logging_steps": 1000,
 
74
  "num_input_tokens_seen": 0,
75
  "num_train_epochs": 500,
76
  "save_steps": 1000,
77
+ "total_flos": 1.9853069722627277e+19,
78
  "train_batch_size": 16,
79
  "trial_name": null,
80
  "trial_params": null