RubyZhi commited on
Commit
b131316
1 Parent(s): a82313c

Training in progress, epoch 1

Browse files
.gitattributes CHANGED
@@ -25,7 +25,6 @@
25
  *.safetensors filter=lfs diff=lfs merge=lfs -text
26
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
  *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
  *.tflite filter=lfs diff=lfs merge=lfs -text
30
  *.tgz filter=lfs diff=lfs merge=lfs -text
31
  *.wasm filter=lfs diff=lfs merge=lfs -text
 
25
  *.safetensors filter=lfs diff=lfs merge=lfs -text
26
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
  *.tar.* filter=lfs diff=lfs merge=lfs -text
 
28
  *.tflite filter=lfs diff=lfs merge=lfs -text
29
  *.tgz filter=lfs diff=lfs merge=lfs -text
30
  *.wasm filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ checkpoint-*/
.ipynb_checkpoints/Untitled-checkpoint.ipynb ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [],
3
+ "metadata": {},
4
+ "nbformat": 4,
5
+ "nbformat_minor": 5
6
+ }
.ipynb_checkpoints/pytorch_model-checkpoint.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aaef78052175f481c584c4a565d03b7c231cbf0288205ad4ad4bd6ec7741b842
3
+ size 134
README.md ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ tags:
4
+ - generated_from_trainer
5
+ datasets:
6
+ - food101
7
+ metrics:
8
+ - accuracy
9
+ model-index:
10
+ - name: vit-base-patch16-224-in21k-finetuned-lora-food101
11
+ results:
12
+ - task:
13
+ name: Image Classification
14
+ type: image-classification
15
+ dataset:
16
+ name: food101
17
+ type: food101
18
+ config: default
19
+ split: train[:5000]
20
+ args: default
21
+ metrics:
22
+ - name: Accuracy
23
+ type: accuracy
24
+ value: 0.964
25
+ ---
26
+
27
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
28
+ should probably proofread and complete it, then remove this comment. -->
29
+
30
+ # vit-base-patch16-224-in21k-finetuned-lora-food101
31
+
32
+ This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the food101 dataset.
33
+ It achieves the following results on the evaluation set:
34
+ - Loss: 0.1408
35
+ - Accuracy: 0.964
36
+
37
+ ## Model description
38
+
39
+ More information needed
40
+
41
+ ## Intended uses & limitations
42
+
43
+ More information needed
44
+
45
+ ## Training and evaluation data
46
+
47
+ More information needed
48
+
49
+ ## Training procedure
50
+
51
+ ### Training hyperparameters
52
+
53
+ The following hyperparameters were used during training:
54
+ - learning_rate: 0.005
55
+ - train_batch_size: 128
56
+ - eval_batch_size: 128
57
+ - seed: 42
58
+ - gradient_accumulation_steps: 4
59
+ - total_train_batch_size: 512
60
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
61
+ - lr_scheduler_type: linear
62
+ - num_epochs: 5
63
+ - mixed_precision_training: Native AMP
64
+
65
+ ### Training results
66
+
67
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy |
68
+ |:-------------:|:-----:|:----:|:---------------:|:--------:|
69
+ | No log | 1.0 | 9 | 0.5739 | 0.874 |
70
+ | 2.1968 | 2.0 | 18 | 0.2064 | 0.944 |
71
+ | 0.3323 | 3.0 | 27 | 0.1521 | 0.96 |
72
+ | 0.2087 | 4.0 | 36 | 0.1408 | 0.964 |
73
+ | 0.1678 | 5.0 | 45 | 0.1352 | 0.962 |
74
+
75
+
76
+ ### Framework versions
77
+
78
+ - Transformers 4.26.1
79
+ - Pytorch 1.13.1+cu117
80
+ - Datasets 2.9.0
81
+ - Tokenizers 0.12.1
Untitled.ipynb ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 2,
6
+ "id": "552bba22-970a-438b-ac08-ffa77ca61aee",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "from transformers import ViTImageProcessor, ViTModel\n",
11
+ "# from datasets import load_dataset\n",
12
+ "from PIL import Image\n",
13
+ "import requests"
14
+ ]
15
+ },
16
+ {
17
+ "cell_type": "code",
18
+ "execution_count": 3,
19
+ "id": "0645639a-b497-41af-aefa-9503da880463",
20
+ "metadata": {},
21
+ "outputs": [
22
+ {
23
+ "data": {
24
+ "application/vnd.jupyter.widget-view+json": {
25
+ "model_id": "f772ceaf98c84fecbc848f924bbe3398",
26
+ "version_major": 2,
27
+ "version_minor": 0
28
+ },
29
+ "text/plain": [
30
+ "preprocessor_config.json: 0%| | 0.00/325 [00:00<?, ?B/s]"
31
+ ]
32
+ },
33
+ "metadata": {},
34
+ "output_type": "display_data"
35
+ },
36
+ {
37
+ "data": {
38
+ "application/vnd.jupyter.widget-view+json": {
39
+ "model_id": "d58d7b601e2b4859a5f1b54892056749",
40
+ "version_major": 2,
41
+ "version_minor": 0
42
+ },
43
+ "text/plain": [
44
+ "config.json: 0%| | 0.00/5.43k [00:00<?, ?B/s]"
45
+ ]
46
+ },
47
+ "metadata": {},
48
+ "output_type": "display_data"
49
+ },
50
+ {
51
+ "data": {
52
+ "application/vnd.jupyter.widget-view+json": {
53
+ "model_id": "4dedafc3fc8a4dacb134148fa7119c84",
54
+ "version_major": 2,
55
+ "version_minor": 0
56
+ },
57
+ "text/plain": [
58
+ "pytorch_model.bin: 0%| | 0.00/346M [00:00<?, ?B/s]"
59
+ ]
60
+ },
61
+ "metadata": {},
62
+ "output_type": "display_data"
63
+ },
64
+ {
65
+ "name": "stderr",
66
+ "output_type": "stream",
67
+ "text": [
68
+ "Some weights of ViTModel were not initialized from the model checkpoint at jsacex/vit-base-patch16-224-in21k-finetuned-lora-food101 and are newly initialized: ['vit.pooler.dense.bias', 'vit.pooler.dense.weight']\n",
69
+ "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
70
+ ]
71
+ }
72
+ ],
73
+ "source": [
74
+ "# dataset = load_dataset(\"food101\")\n",
75
+ "# url = 'http://images.cocodataset.org/val2017/000000039769.jpg'\n",
76
+ "# image = Image.open(requests.get(url, stream=True).raw)\n",
77
+ "image = Image.open(r\"image.jpg\")\n",
78
+ "\n",
79
+ "processor = ViTImageProcessor.from_pretrained('jsacex/vit-base-patch16-224-in21k-finetuned-lora-food101')\n",
80
+ "model = ViTModel.from_pretrained('jsacex/vit-base-patch16-224-in21k-finetuned-lora-food101')\n",
81
+ "inputs = processor(images = image, return_tensors=\"pt\")\n",
82
+ "\n",
83
+ "outputs = model(**inputs)\n",
84
+ "last_hidden_states = outputs.last_hidden_state"
85
+ ]
86
+ },
87
+ {
88
+ "cell_type": "code",
89
+ "execution_count": 12,
90
+ "id": "7572729d-839d-4a9d-bc22-2a69bb8e9aaf",
91
+ "metadata": {},
92
+ "outputs": [
93
+ {
94
+ "name": "stdout",
95
+ "output_type": "stream",
96
+ "text": [
97
+ "torch.Size([1, 197, 768])\n"
98
+ ]
99
+ }
100
+ ],
101
+ "source": [
102
+ "print(last_hidden_states.shape)"
103
+ ]
104
+ },
105
+ {
106
+ "cell_type": "code",
107
+ "execution_count": null,
108
+ "id": "8eb3fa73-6b0c-4a08-b084-a2bd007f2fe4",
109
+ "metadata": {},
110
+ "outputs": [],
111
+ "source": []
112
+ }
113
+ ],
114
+ "metadata": {
115
+ "kernelspec": {
116
+ "display_name": "Python 3 (ipykernel)",
117
+ "language": "python",
118
+ "name": "python3"
119
+ },
120
+ "language_info": {
121
+ "codemirror_mode": {
122
+ "name": "ipython",
123
+ "version": 3
124
+ },
125
+ "file_extension": ".py",
126
+ "mimetype": "text/x-python",
127
+ "name": "python",
128
+ "nbconvert_exporter": "python",
129
+ "pygments_lexer": "ipython3",
130
+ "version": "3.9.12"
131
+ }
132
+ },
133
+ "nbformat": 4,
134
+ "nbformat_minor": 5
135
+ }
adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": {
4
+ "base_model_class": "ViTForImageClassification",
5
+ "parent_library": "transformers.models.vit.modeling_vit"
6
+ },
7
+ "base_model_name_or_path": "google/vit-base-patch16-224-in21k",
8
+ "bias": "none",
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 16,
17
+ "lora_dropout": 0.1,
18
+ "megatron_config": null,
19
+ "megatron_core": "megatron.core",
20
+ "modules_to_save": [
21
+ "classifier"
22
+ ],
23
+ "peft_type": "LORA",
24
+ "r": 16,
25
+ "rank_pattern": {},
26
+ "revision": null,
27
+ "target_modules": [
28
+ "value",
29
+ "query"
30
+ ],
31
+ "task_type": null,
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3fdc10c9ccf030f0416a9d34aa0a1e041eff1df526e3c8ed448186abc6db91c
3
+ size 2677140
config.json ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224-in21k",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "apple_pie",
13
+ "1": "baby_back_ribs",
14
+ "2": "baklava",
15
+ "3": "beef_carpaccio",
16
+ "4": "beef_tartare",
17
+ "5": "beet_salad",
18
+ "6": "beignets",
19
+ "7": "bibimbap",
20
+ "8": "bread_pudding",
21
+ "9": "breakfast_burrito",
22
+ "10": "bruschetta",
23
+ "11": "caesar_salad",
24
+ "12": "cannoli",
25
+ "13": "caprese_salad",
26
+ "14": "carrot_cake",
27
+ "15": "ceviche",
28
+ "16": "cheesecake",
29
+ "17": "cheese_plate",
30
+ "18": "chicken_curry",
31
+ "19": "chicken_quesadilla",
32
+ "20": "chicken_wings",
33
+ "21": "chocolate_cake",
34
+ "22": "chocolate_mousse",
35
+ "23": "churros",
36
+ "24": "clam_chowder",
37
+ "25": "club_sandwich",
38
+ "26": "crab_cakes",
39
+ "27": "creme_brulee",
40
+ "28": "croque_madame",
41
+ "29": "cup_cakes",
42
+ "30": "deviled_eggs",
43
+ "31": "donuts",
44
+ "32": "dumplings",
45
+ "33": "edamame",
46
+ "34": "eggs_benedict",
47
+ "35": "escargots",
48
+ "36": "falafel",
49
+ "37": "filet_mignon",
50
+ "38": "fish_and_chips",
51
+ "39": "foie_gras",
52
+ "40": "french_fries",
53
+ "41": "french_onion_soup",
54
+ "42": "french_toast",
55
+ "43": "fried_calamari",
56
+ "44": "fried_rice",
57
+ "45": "frozen_yogurt",
58
+ "46": "garlic_bread",
59
+ "47": "gnocchi",
60
+ "48": "greek_salad",
61
+ "49": "grilled_cheese_sandwich",
62
+ "50": "grilled_salmon",
63
+ "51": "guacamole",
64
+ "52": "gyoza",
65
+ "53": "hamburger",
66
+ "54": "hot_and_sour_soup",
67
+ "55": "hot_dog",
68
+ "56": "huevos_rancheros",
69
+ "57": "hummus",
70
+ "58": "ice_cream",
71
+ "59": "lasagna",
72
+ "60": "lobster_bisque",
73
+ "61": "lobster_roll_sandwich",
74
+ "62": "macaroni_and_cheese",
75
+ "63": "macarons",
76
+ "64": "miso_soup",
77
+ "65": "mussels",
78
+ "66": "nachos",
79
+ "67": "omelette",
80
+ "68": "onion_rings",
81
+ "69": "oysters",
82
+ "70": "pad_thai",
83
+ "71": "paella",
84
+ "72": "pancakes",
85
+ "73": "panna_cotta",
86
+ "74": "peking_duck",
87
+ "75": "pho",
88
+ "76": "pizza",
89
+ "77": "pork_chop",
90
+ "78": "poutine",
91
+ "79": "prime_rib",
92
+ "80": "pulled_pork_sandwich",
93
+ "81": "ramen",
94
+ "82": "ravioli",
95
+ "83": "red_velvet_cake",
96
+ "84": "risotto",
97
+ "85": "samosa",
98
+ "86": "sashimi",
99
+ "87": "scallops",
100
+ "88": "seaweed_salad",
101
+ "89": "shrimp_and_grits",
102
+ "90": "spaghetti_bolognese",
103
+ "91": "spaghetti_carbonara",
104
+ "92": "spring_rolls",
105
+ "93": "steak",
106
+ "94": "strawberry_shortcake",
107
+ "95": "sushi",
108
+ "96": "tacos",
109
+ "97": "takoyaki",
110
+ "98": "tiramisu",
111
+ "99": "tuna_tartare",
112
+ "100": "waffles"
113
+ },
114
+ "image_size": 224,
115
+ "initializer_range": 0.02,
116
+ "intermediate_size": 3072,
117
+ "label2id": {
118
+ "apple_pie": 0,
119
+ "baby_back_ribs": 1,
120
+ "baklava": 2,
121
+ "beef_carpaccio": 3,
122
+ "beef_tartare": 4,
123
+ "beet_salad": 5,
124
+ "beignets": 6,
125
+ "bibimbap": 7,
126
+ "bread_pudding": 8,
127
+ "breakfast_burrito": 9,
128
+ "bruschetta": 10,
129
+ "caesar_salad": 11,
130
+ "cannoli": 12,
131
+ "caprese_salad": 13,
132
+ "carrot_cake": 14,
133
+ "ceviche": 15,
134
+ "cheese_plate": 17,
135
+ "cheesecake": 16,
136
+ "chicken_curry": 18,
137
+ "chicken_quesadilla": 19,
138
+ "chicken_wings": 20,
139
+ "chocolate_cake": 21,
140
+ "chocolate_mousse": 22,
141
+ "churros": 23,
142
+ "clam_chowder": 24,
143
+ "club_sandwich": 25,
144
+ "crab_cakes": 26,
145
+ "creme_brulee": 27,
146
+ "croque_madame": 28,
147
+ "cup_cakes": 29,
148
+ "deviled_eggs": 30,
149
+ "donuts": 31,
150
+ "dumplings": 32,
151
+ "edamame": 33,
152
+ "eggs_benedict": 34,
153
+ "escargots": 35,
154
+ "falafel": 36,
155
+ "filet_mignon": 37,
156
+ "fish_and_chips": 38,
157
+ "foie_gras": 39,
158
+ "french_fries": 40,
159
+ "french_onion_soup": 41,
160
+ "french_toast": 42,
161
+ "fried_calamari": 43,
162
+ "fried_rice": 44,
163
+ "frozen_yogurt": 45,
164
+ "garlic_bread": 46,
165
+ "gnocchi": 47,
166
+ "greek_salad": 48,
167
+ "grilled_cheese_sandwich": 49,
168
+ "grilled_salmon": 50,
169
+ "guacamole": 51,
170
+ "gyoza": 52,
171
+ "hamburger": 53,
172
+ "hot_and_sour_soup": 54,
173
+ "hot_dog": 55,
174
+ "huevos_rancheros": 56,
175
+ "hummus": 57,
176
+ "ice_cream": 58,
177
+ "lasagna": 59,
178
+ "lobster_bisque": 60,
179
+ "lobster_roll_sandwich": 61,
180
+ "macaroni_and_cheese": 62,
181
+ "macarons": 63,
182
+ "miso_soup": 64,
183
+ "mussels": 65,
184
+ "nachos": 66,
185
+ "omelette": 67,
186
+ "onion_rings": 68,
187
+ "oysters": 69,
188
+ "pad_thai": 70,
189
+ "paella": 71,
190
+ "pancakes": 72,
191
+ "panna_cotta": 73,
192
+ "peking_duck": 74,
193
+ "pho": 75,
194
+ "pizza": 76,
195
+ "pork_chop": 77,
196
+ "poutine": 78,
197
+ "prime_rib": 79,
198
+ "pulled_pork_sandwich": 80,
199
+ "ramen": 81,
200
+ "ravioli": 82,
201
+ "red_velvet_cake": 83,
202
+ "risotto": 84,
203
+ "samosa": 85,
204
+ "sashimi": 86,
205
+ "scallops": 87,
206
+ "seaweed_salad": 88,
207
+ "shrimp_and_grits": 89,
208
+ "spaghetti_bolognese": 90,
209
+ "spaghetti_carbonara": 91,
210
+ "spring_rolls": 92,
211
+ "steak": 93,
212
+ "strawberry_shortcake": 94,
213
+ "sushi": 95,
214
+ "tacos": 96,
215
+ "takoyaki": 97,
216
+ "tiramisu": 98,
217
+ "tuna_tartare": 99,
218
+ "waffles": 100
219
+ },
220
+ "layer_norm_eps": 1e-12,
221
+ "model_type": "vit",
222
+ "num_attention_heads": 12,
223
+ "num_channels": 3,
224
+ "num_hidden_layers": 12,
225
+ "patch_size": 16,
226
+ "problem_type": "single_label_classification",
227
+ "qkv_bias": true,
228
+ "torch_dtype": "float32",
229
+ "transformers_version": "4.26.1"
230
+ }
image.jpg ADDED
preprocessor_config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_valid_processor_keys": [
3
+ "images",
4
+ "do_resize",
5
+ "size",
6
+ "resample",
7
+ "do_rescale",
8
+ "rescale_factor",
9
+ "do_normalize",
10
+ "image_mean",
11
+ "image_std",
12
+ "return_tensors",
13
+ "data_format",
14
+ "input_data_format"
15
+ ],
16
+ "do_normalize": true,
17
+ "do_rescale": true,
18
+ "do_resize": true,
19
+ "image_mean": [
20
+ 0.5,
21
+ 0.5,
22
+ 0.5
23
+ ],
24
+ "image_processor_type": "ViTImageProcessor",
25
+ "image_std": [
26
+ 0.5,
27
+ 0.5,
28
+ 0.5
29
+ ],
30
+ "resample": 2,
31
+ "rescale_factor": 0.00392156862745098,
32
+ "size": {
33
+ "height": 224,
34
+ "width": 224
35
+ }
36
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aaef78052175f481c584c4a565d03b7c231cbf0288205ad4ad4bd6ec7741b842
3
+ size 134
runs/Mar26_03-38-59_node1401.palmetto.clemson.edu/events.out.tfevents.1711439362.node1401.palmetto.clemson.edu.631921.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66e8c7271d22934bf3930c87a059d9ed1c982df4873c8d8a89f023faec0f8913
3
+ size 9421
runs/Mar26_03-52-18_node1401.palmetto.clemson.edu/events.out.tfevents.1711439543.node1401.palmetto.clemson.edu.633326.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c73cc62f25932eabf74bc5b03a9ab4192ad04ce98b491dd9203ab0b114d9f840
3
+ size 9736
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e196867ed45151b00144a6b0628ea01ff70f9d31b70f35292840bb08288c4f5
3
+ size 4603