hojzas commited on
Commit
1e1c07b
1 Parent(s): a5bb337

Add SetFit model

Browse files
1_Pooling/config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "word_embedding_dimension": 768,
3
+ "pooling_mode_cls_token": false,
4
+ "pooling_mode_mean_tokens": true,
5
+ "pooling_mode_max_tokens": false,
6
+ "pooling_mode_mean_sqrt_len_tokens": false
7
+ }
README.md ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: setfit
3
+ tags:
4
+ - setfit
5
+ - sentence-transformers
6
+ - text-classification
7
+ - generated_from_setfit_trainer
8
+ datasets:
9
+ - hojzas/proj8-lab1
10
+ metrics:
11
+ - accuracy
12
+ widget:
13
+ - text: "def first_with_given_key(iterable, key=repr):\n res = []\n keys = set()\n\
14
+ \ for item in iterable:\n if key(item) not in keys:\n keys.add(key(item))\n\
15
+ \ return res"
16
+ - text: "def first_with_given_key(iterable, key=repr):\n\tget_key = get_key_l(key)\n\
17
+ \tused_keys = []\n\tfor item in iterable:\n\t\tkey_item = get_key(item)\n\t\t\t\
18
+ \n\t\tif key_item in used_keys:\n\t\t\tcontinue\n\t\t\n\t\ttry:\n\t\t\tused_keys.append(hash(key_item))\n\
19
+ \t\texcept TypeError:\n\t\t\tused_keys.apppend(repr(key_item))\n\t\t\t\n\t\tyield\
20
+ \ item"
21
+ - text: "def first_with_given_key(iterable, key=repr):\n set_of_keys = set()\n\
22
+ \ key_lambda = _get_lambda(key)\n for item in iterable:\n key = key_lambda(item)\n\
23
+ \ try:\n key_to_set = hash(key)\n except TypeError:\n\
24
+ \ key_to_set = repr(key)\n\n if key_to_set in set_of_keys:\n\
25
+ \ continue\n set_of_keys.add(key_to_set)\n yield item"
26
+ - text: "def first_with_given_key(iterable, key=lambda y: y):\n result = list()\n\
27
+ \ func_it = iter(iterable)\n while True:\n try:\n value\
28
+ \ = next(func_it)\n if key(value) not in result:\n yield\
29
+ \ value\n result.insert(-1, key(value))\n except StopIteration:\n\
30
+ \ break"
31
+ - text: "def first_with_given_key(iterable, key=repr):\n used_keys = {}\n get_key\
32
+ \ = return_key(key)\n for item in iterable:\n item_key = get_key(item)\n\
33
+ \ if item_key in used_keys.keys():\n continue\n try:\n\
34
+ \ used_keys[hash(item_key)] = repr(item)\n except TypeError:\n\
35
+ \ used_keys[repr(item_key)] = repr(item)\n yield item"
36
+ pipeline_tag: text-classification
37
+ inference: true
38
+ co2_eq_emissions:
39
+ emissions: 2.0314927247192536
40
+ source: codecarbon
41
+ training_type: fine-tuning
42
+ on_cloud: false
43
+ cpu_model: Intel(R) Xeon(R) Silver 4314 CPU @ 2.40GHz
44
+ ram_total_size: 251.49161911010742
45
+ hours_used: 0.006
46
+ hardware_used: 4 x NVIDIA RTX A5000
47
+ base_model: sentence-transformers/all-mpnet-base-v2
48
+ model-index:
49
+ - name: SetFit with sentence-transformers/all-mpnet-base-v2
50
+ results:
51
+ - task:
52
+ type: text-classification
53
+ name: Text Classification
54
+ dataset:
55
+ name: hojzas/proj8-lab1
56
+ type: hojzas/proj8-lab1
57
+ split: test
58
+ metrics:
59
+ - type: accuracy
60
+ value: 0.9722222222222222
61
+ name: Accuracy
62
+ ---
63
+
64
+ # SetFit with sentence-transformers/all-mpnet-base-v2
65
+
66
+ This is a [SetFit](https://github.com/huggingface/setfit) model trained on the [hojzas/proj8-lab1](https://huggingface.co/datasets/hojzas/proj8-lab1) dataset that can be used for Text Classification. This SetFit model uses [sentence-transformers/all-mpnet-base-v2](https://huggingface.co/sentence-transformers/all-mpnet-base-v2) as the Sentence Transformer embedding model. A [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance is used for classification.
67
+
68
+ The model has been trained using an efficient few-shot learning technique that involves:
69
+
70
+ 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning.
71
+ 2. Training a classification head with features from the fine-tuned Sentence Transformer.
72
+
73
+ ## Model Details
74
+
75
+ ### Model Description
76
+ - **Model Type:** SetFit
77
+ - **Sentence Transformer body:** [sentence-transformers/all-mpnet-base-v2](https://huggingface.co/sentence-transformers/all-mpnet-base-v2)
78
+ - **Classification head:** a [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance
79
+ - **Maximum Sequence Length:** 384 tokens
80
+ - **Number of Classes:** 2 classes
81
+ - **Training Dataset:** [hojzas/proj8-lab1](https://huggingface.co/datasets/hojzas/proj8-lab1)
82
+ <!-- - **Language:** Unknown -->
83
+ <!-- - **License:** Unknown -->
84
+
85
+ ### Model Sources
86
+
87
+ - **Repository:** [SetFit on GitHub](https://github.com/huggingface/setfit)
88
+ - **Paper:** [Efficient Few-Shot Learning Without Prompts](https://arxiv.org/abs/2209.11055)
89
+ - **Blogpost:** [SetFit: Efficient Few-Shot Learning Without Prompts](https://huggingface.co/blog/setfit)
90
+
91
+ ### Model Labels
92
+ | Label | Examples |
93
+ |:------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
94
+ | 0 | <ul><li>'def first_with_given_key(iterable, key=lambda x: x):\\n keys_in_list = []\\n for it in iterable:\\n if key(it) not in keys_in_list:\\n keys_in_list.append(key(it))\\n yield it'</li><li>'def first_with_given_key(iterable, key=lambda value: value):\\n it = iter(iterable)\\n saved_keys = []\\n while True:\\n try:\\n value = next(it)\\n if key(value) not in saved_keys:\\n saved_keys.append(key(value))\\n yield value\\n except StopIteration:\\n break'</li><li>'def first_with_given_key(iterable, key=None):\\n if key is None:\\n key = lambda x: x\\n item_list = []\\n key_set = set()\\n for item in iterable:\\n generated_item = key(item)\\n if generated_item not in item_list:\\n item_list.append(generated_item)\\n yield item'</li></ul> |
95
+ | 1 | <ul><li>'def first_with_given_key(lst, key = lambda x: x):\\n res = set()\\n for i in lst:\\n if repr(key(i)) not in res:\\n res.add(repr(key(i)))\\n yield i'</li><li>'def first_with_given_key(iterable, key=repr):\\n set_of_keys = set()\\n lambda_key = (lambda x: key(x))\\n for item in iterable:\\n key = lambda_key(item)\\n try:\\n key_for_set = hash(key)\\n except TypeError:\\n key_for_set = repr(key)\\n if key_for_set in set_of_keys:\\n continue\\n set_of_keys.add(key_for_set)\\n yield item'</li><li>'def first_with_given_key(iterable, key=None):\\n if key is None:\\n key = identity\\n appeared_keys = set()\\n for item in iterable:\\n generated_key = key(item)\\n if not generated_key.__hash__:\\n generated_key = repr(generated_key)\\n if generated_key not in appeared_keys:\\n appeared_keys.add(generated_key)\\n yield item'</li></ul> |
96
+
97
+ ## Evaluation
98
+
99
+ ### Metrics
100
+ | Label | Accuracy |
101
+ |:--------|:---------|
102
+ | **all** | 0.9722 |
103
+
104
+ ## Uses
105
+
106
+ ### Direct Use for Inference
107
+
108
+ First install the SetFit library:
109
+
110
+ ```bash
111
+ pip install setfit
112
+ ```
113
+
114
+ Then you can load this model and run inference.
115
+
116
+ ```python
117
+ from setfit import SetFitModel
118
+
119
+ # Download from the 🤗 Hub
120
+ model = SetFitModel.from_pretrained("hojzas/proj8-lab1")
121
+ # Run inference
122
+ preds = model("def first_with_given_key(iterable, key=repr):
123
+ res = []
124
+ keys = set()
125
+ for item in iterable:
126
+ if key(item) not in keys:
127
+ keys.add(key(item))
128
+ return res")
129
+ ```
130
+
131
+ <!--
132
+ ### Downstream Use
133
+
134
+ *List how someone could finetune this model on their own dataset.*
135
+ -->
136
+
137
+ <!--
138
+ ### Out-of-Scope Use
139
+
140
+ *List how the model may foreseeably be misused and address what users ought not to do with the model.*
141
+ -->
142
+
143
+ <!--
144
+ ## Bias, Risks and Limitations
145
+
146
+ *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.*
147
+ -->
148
+
149
+ <!--
150
+ ### Recommendations
151
+
152
+ *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.*
153
+ -->
154
+
155
+ ## Training Details
156
+
157
+ ### Training Set Metrics
158
+ | Training set | Min | Median | Max |
159
+ |:-------------|:----|:--------|:----|
160
+ | Word count | 43 | 91.6071 | 125 |
161
+
162
+ | Label | Training Sample Count |
163
+ |:------|:----------------------|
164
+ | 0 | 20 |
165
+ | 1 | 8 |
166
+
167
+ ### Training Hyperparameters
168
+ - batch_size: (16, 16)
169
+ - num_epochs: (1, 1)
170
+ - max_steps: -1
171
+ - sampling_strategy: oversampling
172
+ - num_iterations: 20
173
+ - body_learning_rate: (2e-05, 2e-05)
174
+ - head_learning_rate: 2e-05
175
+ - loss: CosineSimilarityLoss
176
+ - distance_metric: cosine_distance
177
+ - margin: 0.25
178
+ - end_to_end: False
179
+ - use_amp: False
180
+ - warmup_proportion: 0.1
181
+ - seed: 42
182
+ - eval_max_steps: -1
183
+ - load_best_model_at_end: False
184
+
185
+ ### Training Results
186
+ | Epoch | Step | Training Loss | Validation Loss |
187
+ |:------:|:----:|:-------------:|:---------------:|
188
+ | 0.0143 | 1 | 0.4043 | - |
189
+ | 0.7143 | 50 | 0.0022 | - |
190
+
191
+ ### Environmental Impact
192
+ Carbon emissions were measured using [CodeCarbon](https://github.com/mlco2/codecarbon).
193
+ - **Carbon Emitted**: 0.002 kg of CO2
194
+ - **Hours Used**: 0.006 hours
195
+
196
+ ### Training Hardware
197
+ - **On Cloud**: No
198
+ - **GPU Model**: 4 x NVIDIA RTX A5000
199
+ - **CPU Model**: Intel(R) Xeon(R) Silver 4314 CPU @ 2.40GHz
200
+ - **RAM Size**: 251.49 GB
201
+
202
+ ### Framework Versions
203
+ - Python: 3.10.12
204
+ - SetFit: 1.0.3
205
+ - Sentence Transformers: 2.2.2
206
+ - Transformers: 4.36.1
207
+ - PyTorch: 2.1.2+cu121
208
+ - Datasets: 2.14.7
209
+ - Tokenizers: 0.15.1
210
+
211
+ ## Citation
212
+
213
+ ### BibTeX
214
+ ```bibtex
215
+ @article{https://doi.org/10.48550/arxiv.2209.11055,
216
+ doi = {10.48550/ARXIV.2209.11055},
217
+ url = {https://arxiv.org/abs/2209.11055},
218
+ author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren},
219
+ keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences},
220
+ title = {Efficient Few-Shot Learning Without Prompts},
221
+ publisher = {arXiv},
222
+ year = {2022},
223
+ copyright = {Creative Commons Attribution 4.0 International}
224
+ }
225
+ ```
226
+
227
+ <!--
228
+ ## Glossary
229
+
230
+ *Clearly define terms in order to be accessible across audiences.*
231
+ -->
232
+
233
+ <!--
234
+ ## Model Card Authors
235
+
236
+ *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.*
237
+ -->
238
+
239
+ <!--
240
+ ## Model Card Contact
241
+
242
+ *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.*
243
+ -->
config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/home/xkrejc70/.cache/torch/sentence_transformers/sentence-transformers_all-mpnet-base-v2/",
3
+ "architectures": [
4
+ "MPNetModel"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-05,
15
+ "max_position_embeddings": 514,
16
+ "model_type": "mpnet",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 1,
20
+ "relative_attention_num_buckets": 32,
21
+ "torch_dtype": "float32",
22
+ "transformers_version": "4.36.1",
23
+ "vocab_size": 30527
24
+ }
config_sentence_transformers.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "__version__": {
3
+ "sentence_transformers": "2.0.0",
4
+ "transformers": "4.6.1",
5
+ "pytorch": "1.8.1"
6
+ }
7
+ }
config_setfit.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "labels": null,
3
+ "normalize_embeddings": false
4
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77280e5233e1d778bd1ade31d8dc774b4ef4acfb32fa6cb94e2701cea527b0d8
3
+ size 437967672
model_head.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9a0e85078746f6fe14fdb0f00553205cb2e9e8221580681c177552caa7e692e
3
+ size 7007
modules.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "idx": 0,
4
+ "name": "0",
5
+ "path": "",
6
+ "type": "sentence_transformers.models.Transformer"
7
+ },
8
+ {
9
+ "idx": 1,
10
+ "name": "1",
11
+ "path": "1_Pooling",
12
+ "type": "sentence_transformers.models.Pooling"
13
+ },
14
+ {
15
+ "idx": 2,
16
+ "name": "2",
17
+ "path": "2_Normalize",
18
+ "type": "sentence_transformers.models.Normalize"
19
+ }
20
+ ]
sentence_bert_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "max_seq_length": 384,
3
+ "do_lower_case": false
4
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "<s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "<mask>",
25
+ "lstrip": true,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "<pad>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "</s>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "[UNK]",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<s>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<pad>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": true,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "104": {
36
+ "content": "[UNK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "30526": {
44
+ "content": "<mask>",
45
+ "lstrip": true,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ }
51
+ },
52
+ "bos_token": "<s>",
53
+ "clean_up_tokenization_spaces": true,
54
+ "cls_token": "<s>",
55
+ "do_lower_case": true,
56
+ "eos_token": "</s>",
57
+ "mask_token": "<mask>",
58
+ "max_length": 128,
59
+ "model_max_length": 512,
60
+ "pad_to_multiple_of": null,
61
+ "pad_token": "<pad>",
62
+ "pad_token_type_id": 0,
63
+ "padding_side": "right",
64
+ "sep_token": "</s>",
65
+ "stride": 0,
66
+ "strip_accents": null,
67
+ "tokenize_chinese_chars": true,
68
+ "tokenizer_class": "MPNetTokenizer",
69
+ "truncation_side": "right",
70
+ "truncation_strategy": "longest_first",
71
+ "unk_token": "[UNK]"
72
+ }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff