system HF staff commited on
Commit
84a3bfd
1 Parent(s): a5abd64

Commit From AutoTrain

Browse files
.gitattributes CHANGED
@@ -32,3 +32,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
36
+ *.tar.gz filter=lfs diff=lfs merge=lfs -text
37
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - autotrain
4
+ - text-classification
5
+ language:
6
+ - unk
7
+ widget:
8
+ - text: "I love AutoTrain 🤗"
9
+ datasets:
10
+ - ram119900/autotrain-data-tax_issues
11
+ co2_eq_emissions:
12
+ emissions: 0.044102887824261174
13
+ ---
14
+
15
+ # Model Trained Using AutoTrain
16
+
17
+ - Problem type: Multi-class Classification
18
+ - Model ID: 3708498778
19
+ - CO2 Emissions (in grams): 0.0441
20
+
21
+ ## Validation Metrics
22
+
23
+ - Loss: 0.111
24
+ - Accuracy: 0.968
25
+ - Macro F1: 0.967
26
+ - Micro F1: 0.968
27
+ - Weighted F1: 0.969
28
+ - Macro Precision: 0.968
29
+ - Micro Precision: 0.968
30
+ - Weighted Precision: 0.971
31
+ - Macro Recall: 0.968
32
+ - Micro Recall: 0.968
33
+ - Weighted Recall: 0.968
34
+
35
+
36
+ ## Usage
37
+
38
+ You can use cURL to access this model:
39
+
40
+ ```
41
+ $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/ram119900/autotrain-tax_issues-3708498778
42
+ ```
43
+
44
+ Or Python API:
45
+
46
+ ```
47
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer
48
+
49
+ model = AutoModelForSequenceClassification.from_pretrained("ram119900/autotrain-tax_issues-3708498778", use_auth_token=True)
50
+
51
+ tokenizer = AutoTokenizer.from_pretrained("ram119900/autotrain-tax_issues-3708498778", use_auth_token=True)
52
+
53
+ inputs = tokenizer("I love AutoTrain", return_tensors="pt")
54
+
55
+ outputs = model(**inputs)
56
+ ```
config.json ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "AutoTrain",
3
+ "_num_labels": 50,
4
+ "architectures": [
5
+ "XLMRobertaForSequenceClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.1,
8
+ "bos_token_id": 0,
9
+ "classifier_dropout": null,
10
+ "eos_token_id": 2,
11
+ "hidden_act": "gelu",
12
+ "hidden_dropout_prob": 0.1,
13
+ "hidden_size": 1024,
14
+ "id2label": {
15
+ "0": "Question1",
16
+ "1": "Question10",
17
+ "2": "Question11",
18
+ "3": "Question12",
19
+ "4": "Question13",
20
+ "5": "Question14",
21
+ "6": "Question15",
22
+ "7": "Question16",
23
+ "8": "Question17",
24
+ "9": "Question18",
25
+ "10": "Question19",
26
+ "11": "Question2",
27
+ "12": "Question20",
28
+ "13": "Question21",
29
+ "14": "Question22",
30
+ "15": "Question23",
31
+ "16": "Question24",
32
+ "17": "Question25",
33
+ "18": "Question26",
34
+ "19": "Question27",
35
+ "20": "Question28",
36
+ "21": "Question29",
37
+ "22": "Question3",
38
+ "23": "Question30",
39
+ "24": "Question31",
40
+ "25": "Question32",
41
+ "26": "Question33",
42
+ "27": "Question34",
43
+ "28": "Question35",
44
+ "29": "Question36",
45
+ "30": "Question37",
46
+ "31": "Question38",
47
+ "32": "Question39",
48
+ "33": "Question4",
49
+ "34": "Question40",
50
+ "35": "Question41",
51
+ "36": "Question42",
52
+ "37": "Question43",
53
+ "38": "Question44",
54
+ "39": "Question45",
55
+ "40": "Question46",
56
+ "41": "Question47",
57
+ "42": "Question49",
58
+ "43": "Question5",
59
+ "44": "Question50",
60
+ "45": "Question6",
61
+ "46": "Question7",
62
+ "47": "Question8",
63
+ "48": "Question9",
64
+ "49": "question48"
65
+ },
66
+ "initializer_range": 0.02,
67
+ "intermediate_size": 4096,
68
+ "label2id": {
69
+ "Question1": 0,
70
+ "Question10": 1,
71
+ "Question11": 2,
72
+ "Question12": 3,
73
+ "Question13": 4,
74
+ "Question14": 5,
75
+ "Question15": 6,
76
+ "Question16": 7,
77
+ "Question17": 8,
78
+ "Question18": 9,
79
+ "Question19": 10,
80
+ "Question2": 11,
81
+ "Question20": 12,
82
+ "Question21": 13,
83
+ "Question22": 14,
84
+ "Question23": 15,
85
+ "Question24": 16,
86
+ "Question25": 17,
87
+ "Question26": 18,
88
+ "Question27": 19,
89
+ "Question28": 20,
90
+ "Question29": 21,
91
+ "Question3": 22,
92
+ "Question30": 23,
93
+ "Question31": 24,
94
+ "Question32": 25,
95
+ "Question33": 26,
96
+ "Question34": 27,
97
+ "Question35": 28,
98
+ "Question36": 29,
99
+ "Question37": 30,
100
+ "Question38": 31,
101
+ "Question39": 32,
102
+ "Question4": 33,
103
+ "Question40": 34,
104
+ "Question41": 35,
105
+ "Question42": 36,
106
+ "Question43": 37,
107
+ "Question44": 38,
108
+ "Question45": 39,
109
+ "Question46": 40,
110
+ "Question47": 41,
111
+ "Question49": 42,
112
+ "Question5": 43,
113
+ "Question50": 44,
114
+ "Question6": 45,
115
+ "Question7": 46,
116
+ "Question8": 47,
117
+ "Question9": 48,
118
+ "question48": 49
119
+ },
120
+ "layer_norm_eps": 1e-05,
121
+ "max_length": 64,
122
+ "max_position_embeddings": 514,
123
+ "model_type": "xlm-roberta",
124
+ "num_attention_heads": 16,
125
+ "num_hidden_layers": 24,
126
+ "output_past": true,
127
+ "pad_token_id": 1,
128
+ "padding": "max_length",
129
+ "position_embedding_type": "absolute",
130
+ "problem_type": "single_label_classification",
131
+ "torch_dtype": "float32",
132
+ "transformers_version": "4.25.1",
133
+ "type_vocab_size": 1,
134
+ "use_cache": true,
135
+ "vocab_size": 250002
136
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b2c64ece2711e74c3765957d0ad8addb9f54801c2fe0799b66f88f910d47712
3
+ size 2239906741
sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
3
+ size 5069051
special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45cf3a69915be3571b5e092b2d82d39428d9abff4bd0c79a516e36149045949a
3
+ size 17082923
tokenizer_config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "__type": "AddedToken",
7
+ "content": "<mask>",
8
+ "lstrip": true,
9
+ "normalized": true,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "model_max_length": 512,
14
+ "name_or_path": "AutoTrain",
15
+ "pad_token": "<pad>",
16
+ "sep_token": "</s>",
17
+ "special_tokens_map_file": null,
18
+ "tokenizer_class": "XLMRobertaTokenizer",
19
+ "unk_token": "<unk>"
20
+ }