aisuko commited on
Commit
60f5d54
1 Parent(s): b2ece65

ft-vit-base-patch16-224-in21k-on-food101-lora

Browse files
README.md ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ library_name: peft
4
+ tags:
5
+ - generated_from_trainer
6
+ datasets:
7
+ - food101
8
+ metrics:
9
+ - accuracy
10
+ base_model: google/vit-base-patch16-224-in21k
11
+ model-index:
12
+ - name: ft-vit-base-patch16-224-in21k-on-food101-lora
13
+ results: []
14
+ ---
15
+
16
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
+ should probably proofread and complete it, then remove this comment. -->
18
+
19
+ # ft-vit-base-patch16-224-in21k-on-food101-lora
20
+
21
+ This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the food101 dataset.
22
+ It achieves the following results on the evaluation set:
23
+ - Loss: 0.2242
24
+ - Accuracy: 0.932
25
+
26
+ ## Model description
27
+
28
+ More information needed
29
+
30
+ ## Intended uses & limitations
31
+
32
+ More information needed
33
+
34
+ ## Training and evaluation data
35
+
36
+ More information needed
37
+
38
+ ## Training procedure
39
+
40
+ ### Training hyperparameters
41
+
42
+ The following hyperparameters were used during training:
43
+ - learning_rate: 0.005
44
+ - train_batch_size: 64
45
+ - eval_batch_size: 64
46
+ - seed: 42
47
+ - gradient_accumulation_steps: 4
48
+ - total_train_batch_size: 256
49
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
50
+ - lr_scheduler_type: linear
51
+ - num_epochs: 2
52
+ - mixed_precision_training: Native AMP
53
+
54
+ ### Training results
55
+
56
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy |
57
+ |:-------------:|:-----:|:----:|:---------------:|:--------:|
58
+ | 2.2628 | 0.96 | 17 | 0.3024 | 0.912 |
59
+ | 0.2236 | 1.92 | 34 | 0.2242 | 0.932 |
60
+
61
+
62
+ ### Framework versions
63
+
64
+ - PEFT 0.7.1
65
+ - Transformers 4.36.2
66
+ - Pytorch 2.0.0
67
+ - Datasets 2.1.0
68
+ - Tokenizers 0.15.0
adapter_config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": {
4
+ "base_model_class": "ViTForImageClassification",
5
+ "parent_library": "transformers.models.vit.modeling_vit"
6
+ },
7
+ "base_model_name_or_path": "google/vit-base-patch16-224-in21k",
8
+ "bias": "none",
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layers_pattern": null,
13
+ "layers_to_transform": null,
14
+ "loftq_config": {},
15
+ "lora_alpha": 16,
16
+ "lora_dropout": 0.1,
17
+ "megatron_config": null,
18
+ "megatron_core": "megatron.core",
19
+ "modules_to_save": [
20
+ "classifier"
21
+ ],
22
+ "peft_type": "LORA",
23
+ "r": 16,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "query",
28
+ "value"
29
+ ],
30
+ "task_type": null
31
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:083c615b53de45de563683e5582cbeaef4cbb7ab5c00d3b9aac11f1c0be5ac96
3
+ size 2677140
preprocessor_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "image_mean": [
6
+ 0.5,
7
+ 0.5,
8
+ 0.5
9
+ ],
10
+ "image_processor_type": "ViTImageProcessor",
11
+ "image_std": [
12
+ 0.5,
13
+ 0.5,
14
+ 0.5
15
+ ],
16
+ "resample": 2,
17
+ "rescale_factor": 0.00392156862745098,
18
+ "size": {
19
+ "height": 224,
20
+ "width": 224
21
+ }
22
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:498ebecf657d8f13ec50f8bca79cadf7ca460820bc2c5bae01cb2cd841c578e8
3
+ size 4411