lim1202 commited on
Commit
f7dc040
1 Parent(s): 283c636

🔧 fix dataset

Browse files
README.md CHANGED
@@ -15,7 +15,7 @@ model-index:
15
  name: Image Classification
16
  type: image-classification
17
  dataset:
18
- name: beans
19
  type: imagefolder
20
  config: default
21
  split: validation
@@ -31,9 +31,9 @@ should probably proofread and complete it, then remove this comment. -->
31
 
32
  # vit-base-id-card
33
 
34
- This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset.
35
  It achieves the following results on the evaluation set:
36
- - Loss: 0.0611
37
  - Accuracy: 1.0
38
 
39
  ## Model description
@@ -60,7 +60,6 @@ The following hyperparameters were used during training:
60
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
61
  - lr_scheduler_type: linear
62
  - num_epochs: 4
63
- - mixed_precision_training: Native AMP
64
 
65
  ### Training results
66
 
@@ -69,6 +68,6 @@ The following hyperparameters were used during training:
69
  ### Framework versions
70
 
71
  - Transformers 4.38.2
72
- - Pytorch 2.2.1+cu121
73
  - Datasets 2.18.0
74
  - Tokenizers 0.15.2
 
15
  name: Image Classification
16
  type: image-classification
17
  dataset:
18
+ name: customize
19
  type: imagefolder
20
  config: default
21
  split: validation
 
31
 
32
  # vit-base-id-card
33
 
34
+ This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the customize dataset.
35
  It achieves the following results on the evaluation set:
36
+ - Loss: 0.0594
37
  - Accuracy: 1.0
38
 
39
  ## Model description
 
60
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
61
  - lr_scheduler_type: linear
62
  - num_epochs: 4
 
63
 
64
  ### Training results
65
 
 
68
  ### Framework versions
69
 
70
  - Transformers 4.38.2
71
+ - Pytorch 2.2.1
72
  - Datasets 2.18.0
73
  - Tokenizers 0.15.2
all_results.json CHANGED
@@ -1,13 +1,12 @@
1
  {
2
  "epoch": 4.0,
3
  "eval_accuracy": 1.0,
4
- "eval_loss": 0.0610995814204216,
5
- "eval_runtime": 1.8578,
6
- "eval_samples_per_second": 14.533,
7
- "eval_steps_per_second": 2.153,
8
- "total_flos": 3.502669323615437e+16,
9
- "train_loss": 0.25591564178466797,
10
- "train_runtime": 40.8688,
11
- "train_samples_per_second": 11.06,
12
- "train_steps_per_second": 0.783
13
  }
 
1
  {
2
  "epoch": 4.0,
3
  "eval_accuracy": 1.0,
4
+ "eval_loss": 0.05941523611545563,
5
+ "eval_runtime": 2.2052,
6
+ "eval_samples_per_second": 12.244,
7
+ "eval_steps_per_second": 1.814,
8
+ "train_loss": 0.25243260501883924,
9
+ "train_runtime": 85.4792,
10
+ "train_samples_per_second": 5.288,
11
+ "train_steps_per_second": 0.374
 
12
  }
eval_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 4.0,
3
  "eval_accuracy": 1.0,
4
- "eval_loss": 0.0610995814204216,
5
- "eval_runtime": 1.8578,
6
- "eval_samples_per_second": 14.533,
7
- "eval_steps_per_second": 2.153
8
  }
 
1
  {
2
  "epoch": 4.0,
3
  "eval_accuracy": 1.0,
4
+ "eval_loss": 0.05941523611545563,
5
+ "eval_runtime": 2.2052,
6
+ "eval_samples_per_second": 12.244,
7
+ "eval_steps_per_second": 1.814
8
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:17fddba4b9b119ad3ef2426885782d2b79033eb714640f3d62da164f57c27206
3
  size 343227052
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bff36a0971def28059fa5de42fe3d66e08a9c955e842c930f175761510dbb14f
3
  size 343227052
preprocessor_config.json CHANGED
@@ -7,7 +7,7 @@
7
  0.5,
8
  0.5
9
  ],
10
- "image_processor_type": "ViTFeatureExtractor",
11
  "image_std": [
12
  0.5,
13
  0.5,
 
7
  0.5,
8
  0.5
9
  ],
10
+ "image_processor_type": "ViTImageProcessor",
11
  "image_std": [
12
  0.5,
13
  0.5,
train_results.json CHANGED
@@ -1,8 +1,7 @@
1
  {
2
  "epoch": 4.0,
3
- "total_flos": 3.502669323615437e+16,
4
- "train_loss": 0.25591564178466797,
5
- "train_runtime": 40.8688,
6
- "train_samples_per_second": 11.06,
7
- "train_steps_per_second": 0.783
8
  }
 
1
  {
2
  "epoch": 4.0,
3
+ "train_loss": 0.25243260501883924,
4
+ "train_runtime": 85.4792,
5
+ "train_samples_per_second": 5.288,
6
+ "train_steps_per_second": 0.374
 
7
  }
trainer_state.json CHANGED
@@ -10,33 +10,33 @@
10
  "log_history": [
11
  {
12
  "epoch": 1.25,
13
- "grad_norm": 1.040573000907898,
14
- "learning_rate": 0.00014375,
15
- "loss": 0.6083,
16
  "step": 10
17
  },
18
  {
19
  "epoch": 2.5,
20
- "grad_norm": 0.4061480462551117,
21
- "learning_rate": 8.125000000000001e-05,
22
- "loss": 0.1341,
23
  "step": 20
24
  },
25
  {
26
  "epoch": 3.75,
27
- "grad_norm": 0.30543988943099976,
28
- "learning_rate": 1.8750000000000002e-05,
29
- "loss": 0.0648,
30
  "step": 30
31
  },
32
  {
33
  "epoch": 4.0,
34
  "step": 32,
35
  "total_flos": 3.502669323615437e+16,
36
- "train_loss": 0.25591564178466797,
37
- "train_runtime": 40.8688,
38
- "train_samples_per_second": 11.06,
39
- "train_steps_per_second": 0.783
40
  }
41
  ],
42
  "logging_steps": 10,
 
10
  "log_history": [
11
  {
12
  "epoch": 1.25,
13
+ "grad_norm": 0.9073283672332764,
14
+ "learning_rate": 0.0001375,
15
+ "loss": 0.6222,
16
  "step": 10
17
  },
18
  {
19
  "epoch": 2.5,
20
+ "grad_norm": 0.34812086820602417,
21
+ "learning_rate": 7.500000000000001e-05,
22
+ "loss": 0.1155,
23
  "step": 20
24
  },
25
  {
26
  "epoch": 3.75,
27
+ "grad_norm": 0.27614542841911316,
28
+ "learning_rate": 1.25e-05,
29
+ "loss": 0.0591,
30
  "step": 30
31
  },
32
  {
33
  "epoch": 4.0,
34
  "step": 32,
35
  "total_flos": 3.502669323615437e+16,
36
+ "train_loss": 0.25243260501883924,
37
+ "train_runtime": 85.4792,
38
+ "train_samples_per_second": 5.288,
39
+ "train_steps_per_second": 0.374
40
  }
41
  ],
42
  "logging_steps": 10,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ef34432b313669942c77076b60d487bda6024f60f7696c7295a2118b4ca0d8b5
3
- size 4856
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b30cbce2611a7d439fc79c26cb16d49c03b291372677674fc44afb3d2c9498cd
3
+ size 4920