Shresthadev403 commited on
Commit
09faaf9
1 Parent(s): 4b3682e

End of training

Browse files
README.md CHANGED
@@ -17,13 +17,13 @@ should probably proofread and complete it, then remove this comment. -->
17
 
18
  This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the food101 dataset.
19
  It achieves the following results on the evaluation set:
20
- - eval_loss: 0.7739
21
- - eval_accuracy: 0.8105
22
- - eval_runtime: 155.4026
23
- - eval_samples_per_second: 97.489
24
- - eval_steps_per_second: 6.094
25
- - epoch: 17.95
26
- - step: 17000
27
 
28
  ## Model description
29
 
 
17
 
18
  This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the food101 dataset.
19
  It achieves the following results on the evaluation set:
20
+ - eval_loss: 0.7518
21
+ - eval_accuracy: 0.8152
22
+ - eval_runtime: 153.2443
23
+ - eval_samples_per_second: 98.862
24
+ - eval_steps_per_second: 6.18
25
+ - epoch: 19.01
26
+ - step: 18000
27
 
28
  ## Model description
29
 
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e2d58135f1a03fadd71acabda7ba2210a176047b593fede337bd18ee253151c0
3
  size 343528508
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25eb29a43d1c3706c53a685065c6e3c7c500f7243e2151d946001a94203235e2
3
  size 343528508
runs/Feb05_03-08-58_983d148b451b/events.out.tfevents.1707102539.983d148b451b.26.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:172ab9543b209b7cd22070096df4fcbf55d70c33e4079593e3d0b6e069547365
3
- size 17470
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc9304e21c3c2f1e31882a2e82185835820969fdeb90c4999dc1436d1cf932b8
3
+ size 17959
trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.8158415841584158,
3
  "best_model_checkpoint": "food-image-classification/checkpoint-16000",
4
- "epoch": 17.95142555438226,
5
  "eval_steps": 1000,
6
- "global_step": 17000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -262,6 +262,21 @@
262
  "eval_samples_per_second": 97.489,
263
  "eval_steps_per_second": 6.094,
264
  "step": 17000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
265
  }
266
  ],
267
  "logging_steps": 1000,
@@ -269,7 +284,7 @@
269
  "num_input_tokens_seen": 0,
270
  "num_train_epochs": 500,
271
  "save_steps": 1000,
272
- "total_flos": 8.437554632116593e+19,
273
  "train_batch_size": 16,
274
  "trial_name": null,
275
  "trial_params": null
 
1
  {
2
  "best_metric": 0.8158415841584158,
3
  "best_model_checkpoint": "food-image-classification/checkpoint-16000",
4
+ "epoch": 19.00739176346357,
5
  "eval_steps": 1000,
6
+ "global_step": 18000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
262
  "eval_samples_per_second": 97.489,
263
  "eval_steps_per_second": 6.094,
264
  "step": 17000
265
+ },
266
+ {
267
+ "epoch": 19.01,
268
+ "learning_rate": 1.900739176346357e-05,
269
+ "loss": 0.4951,
270
+ "step": 18000
271
+ },
272
+ {
273
+ "epoch": 19.01,
274
+ "eval_accuracy": 0.8151815181518152,
275
+ "eval_loss": 0.7517885565757751,
276
+ "eval_runtime": 153.2443,
277
+ "eval_samples_per_second": 98.862,
278
+ "eval_steps_per_second": 6.18,
279
+ "step": 18000
280
  }
281
  ],
282
  "logging_steps": 1000,
 
284
  "num_input_tokens_seen": 0,
285
  "num_train_epochs": 500,
286
  "save_steps": 1000,
287
+ "total_flos": 8.933819326583316e+19,
288
  "train_batch_size": 16,
289
  "trial_name": null,
290
  "trial_params": null