abhishek HF staff commited on
Commit
2fe19dd
1 Parent(s): c4449ac

Commit From AutoNLP

Browse files
.gitattributes CHANGED
@@ -25,3 +25,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar.gz filter=lfs diff=lfs merge=lfs -text
29
+ *.pkl filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags: autonlp
3
+ language: en
4
+ widget:
5
+ - text: "I love AutoNLP 🤗"
6
+ datasets:
7
+ - Smone55/autonlp-data-au_topics
8
+ co2_eq_emissions: 208.0823957145878
9
+ ---
10
+
11
+ # Model Trained Using AutoNLP
12
+
13
+ - Problem type: Multi-class Classification
14
+ - Model ID: 452311620
15
+ - CO2 Emissions (in grams): 208.0823957145878
16
+
17
+ ## Validation Metrics
18
+
19
+ - Loss: 0.5259971022605896
20
+ - Accuracy: 0.8767479025169796
21
+ - Macro F1: 0.8618813750734912
22
+ - Micro F1: 0.8767479025169796
23
+ - Weighted F1: 0.8742964006840133
24
+ - Macro Precision: 0.8627700506991158
25
+ - Micro Precision: 0.8767479025169796
26
+ - Weighted Precision: 0.8755603985289852
27
+ - Macro Recall: 0.8662183006750934
28
+ - Micro Recall: 0.8767479025169796
29
+ - Weighted Recall: 0.8767479025169796
30
+
31
+
32
+ ## Usage
33
+
34
+ You can use cURL to access this model:
35
+
36
+ ```
37
+ $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoNLP"}' https://api-inference.huggingface.co/models/Smone55/autonlp-au_topics-452311620
38
+ ```
39
+
40
+ Or Python API:
41
+
42
+ ```
43
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer
44
+
45
+ model = AutoModelForSequenceClassification.from_pretrained("Smone55/autonlp-au_topics-452311620", use_auth_token=True)
46
+
47
+ tokenizer = AutoTokenizer.from_pretrained("Smone55/autonlp-au_topics-452311620", use_auth_token=True)
48
+
49
+ inputs = tokenizer("I love AutoNLP", return_tensors="pt")
50
+
51
+ outputs = model(**inputs)
52
+ ```
config.json ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "AutoNLP",
3
+ "_num_labels": 127,
4
+ "architectures": [
5
+ "BertForSequenceClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.1,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 1024,
12
+ "id2label": {
13
+ "0": "-1",
14
+ "1": "0",
15
+ "2": "1",
16
+ "3": "10",
17
+ "4": "100",
18
+ "5": "101",
19
+ "6": "102",
20
+ "7": "103",
21
+ "8": "104",
22
+ "9": "105",
23
+ "10": "106",
24
+ "11": "107",
25
+ "12": "108",
26
+ "13": "109",
27
+ "14": "11",
28
+ "15": "110",
29
+ "16": "111",
30
+ "17": "112",
31
+ "18": "113",
32
+ "19": "114",
33
+ "20": "115",
34
+ "21": "116",
35
+ "22": "117",
36
+ "23": "118",
37
+ "24": "119",
38
+ "25": "12",
39
+ "26": "120",
40
+ "27": "121",
41
+ "28": "122",
42
+ "29": "123",
43
+ "30": "124",
44
+ "31": "125",
45
+ "32": "13",
46
+ "33": "14",
47
+ "34": "15",
48
+ "35": "16",
49
+ "36": "17",
50
+ "37": "18",
51
+ "38": "19",
52
+ "39": "2",
53
+ "40": "20",
54
+ "41": "21",
55
+ "42": "22",
56
+ "43": "23",
57
+ "44": "24",
58
+ "45": "25",
59
+ "46": "26",
60
+ "47": "27",
61
+ "48": "28",
62
+ "49": "29",
63
+ "50": "3",
64
+ "51": "30",
65
+ "52": "31",
66
+ "53": "32",
67
+ "54": "33",
68
+ "55": "34",
69
+ "56": "35",
70
+ "57": "36",
71
+ "58": "37",
72
+ "59": "38",
73
+ "60": "39",
74
+ "61": "4",
75
+ "62": "40",
76
+ "63": "41",
77
+ "64": "42",
78
+ "65": "43",
79
+ "66": "44",
80
+ "67": "45",
81
+ "68": "46",
82
+ "69": "47",
83
+ "70": "48",
84
+ "71": "49",
85
+ "72": "5",
86
+ "73": "50",
87
+ "74": "51",
88
+ "75": "52",
89
+ "76": "53",
90
+ "77": "54",
91
+ "78": "55",
92
+ "79": "56",
93
+ "80": "57",
94
+ "81": "58",
95
+ "82": "59",
96
+ "83": "6",
97
+ "84": "60",
98
+ "85": "61",
99
+ "86": "62",
100
+ "87": "63",
101
+ "88": "64",
102
+ "89": "65",
103
+ "90": "66",
104
+ "91": "67",
105
+ "92": "68",
106
+ "93": "69",
107
+ "94": "7",
108
+ "95": "70",
109
+ "96": "71",
110
+ "97": "72",
111
+ "98": "73",
112
+ "99": "74",
113
+ "100": "75",
114
+ "101": "76",
115
+ "102": "77",
116
+ "103": "78",
117
+ "104": "79",
118
+ "105": "8",
119
+ "106": "80",
120
+ "107": "81",
121
+ "108": "82",
122
+ "109": "83",
123
+ "110": "84",
124
+ "111": "85",
125
+ "112": "86",
126
+ "113": "87",
127
+ "114": "88",
128
+ "115": "89",
129
+ "116": "9",
130
+ "117": "90",
131
+ "118": "91",
132
+ "119": "92",
133
+ "120": "93",
134
+ "121": "94",
135
+ "122": "95",
136
+ "123": "96",
137
+ "124": "97",
138
+ "125": "98",
139
+ "126": "99"
140
+ },
141
+ "initializer_range": 0.02,
142
+ "intermediate_size": 4096,
143
+ "label2id": {
144
+ "-1": 0,
145
+ "0": 1,
146
+ "1": 2,
147
+ "10": 3,
148
+ "100": 4,
149
+ "101": 5,
150
+ "102": 6,
151
+ "103": 7,
152
+ "104": 8,
153
+ "105": 9,
154
+ "106": 10,
155
+ "107": 11,
156
+ "108": 12,
157
+ "109": 13,
158
+ "11": 14,
159
+ "110": 15,
160
+ "111": 16,
161
+ "112": 17,
162
+ "113": 18,
163
+ "114": 19,
164
+ "115": 20,
165
+ "116": 21,
166
+ "117": 22,
167
+ "118": 23,
168
+ "119": 24,
169
+ "12": 25,
170
+ "120": 26,
171
+ "121": 27,
172
+ "122": 28,
173
+ "123": 29,
174
+ "124": 30,
175
+ "125": 31,
176
+ "13": 32,
177
+ "14": 33,
178
+ "15": 34,
179
+ "16": 35,
180
+ "17": 36,
181
+ "18": 37,
182
+ "19": 38,
183
+ "2": 39,
184
+ "20": 40,
185
+ "21": 41,
186
+ "22": 42,
187
+ "23": 43,
188
+ "24": 44,
189
+ "25": 45,
190
+ "26": 46,
191
+ "27": 47,
192
+ "28": 48,
193
+ "29": 49,
194
+ "3": 50,
195
+ "30": 51,
196
+ "31": 52,
197
+ "32": 53,
198
+ "33": 54,
199
+ "34": 55,
200
+ "35": 56,
201
+ "36": 57,
202
+ "37": 58,
203
+ "38": 59,
204
+ "39": 60,
205
+ "4": 61,
206
+ "40": 62,
207
+ "41": 63,
208
+ "42": 64,
209
+ "43": 65,
210
+ "44": 66,
211
+ "45": 67,
212
+ "46": 68,
213
+ "47": 69,
214
+ "48": 70,
215
+ "49": 71,
216
+ "5": 72,
217
+ "50": 73,
218
+ "51": 74,
219
+ "52": 75,
220
+ "53": 76,
221
+ "54": 77,
222
+ "55": 78,
223
+ "56": 79,
224
+ "57": 80,
225
+ "58": 81,
226
+ "59": 82,
227
+ "6": 83,
228
+ "60": 84,
229
+ "61": 85,
230
+ "62": 86,
231
+ "63": 87,
232
+ "64": 88,
233
+ "65": 89,
234
+ "66": 90,
235
+ "67": 91,
236
+ "68": 92,
237
+ "69": 93,
238
+ "7": 94,
239
+ "70": 95,
240
+ "71": 96,
241
+ "72": 97,
242
+ "73": 98,
243
+ "74": 99,
244
+ "75": 100,
245
+ "76": 101,
246
+ "77": 102,
247
+ "78": 103,
248
+ "79": 104,
249
+ "8": 105,
250
+ "80": 106,
251
+ "81": 107,
252
+ "82": 108,
253
+ "83": 109,
254
+ "84": 110,
255
+ "85": 111,
256
+ "86": 112,
257
+ "87": 113,
258
+ "88": 114,
259
+ "89": 115,
260
+ "9": 116,
261
+ "90": 117,
262
+ "91": 118,
263
+ "92": 119,
264
+ "93": 120,
265
+ "94": 121,
266
+ "95": 122,
267
+ "96": 123,
268
+ "97": 124,
269
+ "98": 125,
270
+ "99": 126
271
+ },
272
+ "layer_norm_eps": 1e-12,
273
+ "max_length": 128,
274
+ "max_position_embeddings": 512,
275
+ "model_type": "bert",
276
+ "num_attention_heads": 16,
277
+ "num_hidden_layers": 24,
278
+ "pad_token_id": 0,
279
+ "padding": "max_length",
280
+ "position_embedding_type": "absolute",
281
+ "problem_type": "single_label_classification",
282
+ "transformers_version": "4.8.0",
283
+ "type_vocab_size": 2,
284
+ "use_cache": true,
285
+ "vocab_size": 30522
286
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:638717e289dbbf80fa8b79d1efca109e799b38ecb8e6f43d4fb1ea9c9810d011
3
+ size 1341249901
sample_input.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8058f9f2c01adfce977028e45132917b298c2e366b35167f6c6c4629c510de76
3
+ size 4384
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "AutoNLP", "tokenizer_class": "BertTokenizer"}
vocab.txt ADDED
The diff for this file is too large to render. See raw diff