Shresthadev403 commited on
Commit
8d9286c
1 Parent(s): 1464f66

End of training

Browse files
README.md CHANGED
@@ -17,13 +17,13 @@ should probably proofread and complete it, then remove this comment. -->
17
 
18
  This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the food101 dataset.
19
  It achieves the following results on the evaluation set:
20
- - eval_loss: 2.3822
21
- - eval_accuracy: 0.7071
22
- - eval_runtime: 155.5967
23
- - eval_samples_per_second: 97.367
24
- - eval_steps_per_second: 6.086
25
- - epoch: 7.39
26
- - step: 7000
27
 
28
  ## Model description
29
 
 
17
 
18
  This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the food101 dataset.
19
  It achieves the following results on the evaluation set:
20
+ - eval_loss: 1.9754
21
+ - eval_accuracy: 0.7336
22
+ - eval_runtime: 156.8793
23
+ - eval_samples_per_second: 96.571
24
+ - eval_steps_per_second: 6.036
25
+ - epoch: 8.45
26
+ - step: 8000
27
 
28
  ## Model description
29
 
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:42e59812c0f6edec89f8849798093e690a2f7634ed7bca9a5ca5578ab84fcab2
3
  size 343528508
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b20edeb0fe7d3b161c0a46a58cae188fa221d032d8c7766255b9a02137a73c09
3
  size 343528508
runs/Feb05_03-08-58_983d148b451b/events.out.tfevents.1707102539.983d148b451b.26.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5cc803c469db4aff184af8460d4a1ebbe418b0f1fdef1522e6e8903678ef5fbb
3
- size 12661
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a033a1a586ec82c34e1ea9c30d4c0e829e843a9fd4da33e9e4e02374982ce418
3
+ size 13141
trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.687062706270627,
3
- "best_model_checkpoint": "food-image-classification/checkpoint-6000",
4
- "epoch": 7.391763463569166,
5
  "eval_steps": 1000,
6
- "global_step": 7000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -112,6 +112,21 @@
112
  "eval_samples_per_second": 97.367,
113
  "eval_steps_per_second": 6.086,
114
  "step": 7000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
  }
116
  ],
117
  "logging_steps": 1000,
@@ -119,7 +134,7 @@
119
  "num_input_tokens_seen": 0,
120
  "num_train_epochs": 500,
121
  "save_steps": 1000,
122
- "total_flos": 3.4742872014597734e+19,
123
  "train_batch_size": 16,
124
  "trial_name": null,
125
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.7070627062706271,
3
+ "best_model_checkpoint": "food-image-classification/checkpoint-7000",
4
+ "epoch": 8.447729672650475,
5
  "eval_steps": 1000,
6
+ "global_step": 8000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
112
  "eval_samples_per_second": 97.367,
113
  "eval_steps_per_second": 6.086,
114
  "step": 7000
115
+ },
116
+ {
117
+ "epoch": 8.45,
118
+ "learning_rate": 8.447729672650476e-06,
119
+ "loss": 2.1397,
120
+ "step": 8000
121
+ },
122
+ {
123
+ "epoch": 8.45,
124
+ "eval_accuracy": 0.7335973597359736,
125
+ "eval_loss": 1.975380539894104,
126
+ "eval_runtime": 156.8793,
127
+ "eval_samples_per_second": 96.571,
128
+ "eval_steps_per_second": 6.036,
129
+ "step": 8000
130
  }
131
  ],
132
  "logging_steps": 1000,
 
134
  "num_input_tokens_seen": 0,
135
  "num_train_epochs": 500,
136
  "save_steps": 1000,
137
+ "total_flos": 3.970613944525455e+19,
138
  "train_batch_size": 16,
139
  "trial_name": null,
140
  "trial_params": null