Shresthadev403 commited on
Commit
32067e5
1 Parent(s): 6b2101c

End of training

Browse files
README.md CHANGED
@@ -17,13 +17,13 @@ should probably proofread and complete it, then remove this comment. -->
17
 
18
  This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the food101 dataset.
19
  It achieves the following results on the evaluation set:
20
- - eval_loss: 1.1283
21
- - eval_accuracy: 0.7832
22
- - eval_runtime: 153.8564
23
- - eval_samples_per_second: 98.468
24
- - eval_steps_per_second: 6.155
25
- - epoch: 11.62
26
- - step: 11000
27
 
28
  ## Model description
29
 
 
17
 
18
  This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the food101 dataset.
19
  It achieves the following results on the evaluation set:
20
+ - eval_loss: 0.9767
21
+ - eval_accuracy: 0.7947
22
+ - eval_runtime: 155.0819
23
+ - eval_samples_per_second: 97.69
24
+ - eval_steps_per_second: 6.106
25
+ - epoch: 12.67
26
+ - step: 12000
27
 
28
  ## Model description
29
 
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:940b97d37a43ff1b0ab70f1b55193d1f794920824e7c9e04ea4ec3f7454c0462
3
  size 343528508
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0c30cd551dee8b88adf1abb5ea9088a9c56b772faeac0a11e9a6f54d0fbf35b
3
  size 343528508
runs/Feb05_03-08-58_983d148b451b/events.out.tfevents.1707102539.983d148b451b.26.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b8e6f5207bc35a00cff7abf7f9df9f8f5c1dceaec7dd3ab5741ddaba6acda3ba
3
- size 14581
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f9c454d2313ba3956712f0130a2223945517cc8f21e0eb033697f07c35c2f430
3
+ size 15061
trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.7758415841584159,
3
- "best_model_checkpoint": "food-image-classification/checkpoint-10000",
4
- "epoch": 11.615628299894404,
5
  "eval_steps": 1000,
6
- "global_step": 11000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -172,6 +172,21 @@
172
  "eval_samples_per_second": 98.468,
173
  "eval_steps_per_second": 6.155,
174
  "step": 11000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
175
  }
176
  ],
177
  "logging_steps": 1000,
@@ -179,7 +194,7 @@
179
  "num_input_tokens_seen": 0,
180
  "num_train_epochs": 500,
181
  "save_steps": 1000,
182
- "total_flos": 5.459594173722501e+19,
183
  "train_batch_size": 16,
184
  "trial_name": null,
185
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.7831683168316832,
3
+ "best_model_checkpoint": "food-image-classification/checkpoint-11000",
4
+ "epoch": 12.671594508975712,
5
  "eval_steps": 1000,
6
+ "global_step": 12000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
172
  "eval_samples_per_second": 98.468,
173
  "eval_steps_per_second": 6.155,
174
  "step": 11000
175
+ },
176
+ {
177
+ "epoch": 12.67,
178
+ "learning_rate": 1.2671594508975712e-05,
179
+ "loss": 0.9312,
180
+ "step": 12000
181
+ },
182
+ {
183
+ "epoch": 12.67,
184
+ "eval_accuracy": 0.7946534653465347,
185
+ "eval_loss": 0.9766868352890015,
186
+ "eval_runtime": 155.0819,
187
+ "eval_samples_per_second": 97.69,
188
+ "eval_steps_per_second": 6.106,
189
+ "step": 12000
190
  }
191
  ],
192
  "logging_steps": 1000,
 
194
  "num_input_tokens_seen": 0,
195
  "num_train_epochs": 500,
196
  "save_steps": 1000,
197
+ "total_flos": 5.955920916788183e+19,
198
  "train_batch_size": 16,
199
  "trial_name": null,
200
  "trial_params": null