abhishek HF staff commited on
Commit
3f55b78
1 Parent(s): 265f79a

Commit From AutoTrain

Browse files
.gitattributes CHANGED
@@ -25,3 +25,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar.gz filter=lfs diff=lfs merge=lfs -text
29
+ *.pkl filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags: autotrain
3
+ language: es
4
+ widget:
5
+ - text: "I love AutoTrain 🤗"
6
+ datasets:
7
+ - gabitoo1234/autotrain-data-mut_all_text
8
+ co2_eq_emissions: 115.48848403681228
9
+ ---
10
+
11
+ # Model Trained Using AutoTrain
12
+
13
+ - Problem type: Multi-class Classification
14
+ - Model ID: 680820343
15
+ - CO2 Emissions (in grams): 115.48848403681228
16
+
17
+ ## Validation Metrics
18
+
19
+ - Loss: 0.3041240870952606
20
+ - Accuracy: 0.9462770369425126
21
+ - Macro F1: 0.7836898686625933
22
+ - Micro F1: 0.9462770369425126
23
+ - Weighted F1: 0.9449148298990091
24
+ - Macro Precision: 0.8344505891491089
25
+ - Micro Precision: 0.9462770369425126
26
+ - Weighted Precision: 0.9451247372908952
27
+ - Macro Recall: 0.7568785255994025
28
+ - Micro Recall: 0.9462770369425126
29
+ - Weighted Recall: 0.9462770369425126
30
+
31
+
32
+ ## Usage
33
+
34
+ You can use cURL to access this model:
35
+
36
+ ```
37
+ $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/gabitoo1234/autotrain-mut_all_text-680820343
38
+ ```
39
+
40
+ Or Python API:
41
+
42
+ ```
43
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer
44
+
45
+ model = AutoModelForSequenceClassification.from_pretrained("gabitoo1234/autotrain-mut_all_text-680820343", use_auth_token=True)
46
+
47
+ tokenizer = AutoTokenizer.from_pretrained("gabitoo1234/autotrain-mut_all_text-680820343", use_auth_token=True)
48
+
49
+ inputs = tokenizer("I love AutoTrain", return_tensors="pt")
50
+
51
+ outputs = model(**inputs)
52
+ ```
config.json ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "AutoTrain",
3
+ "_num_labels": 57,
4
+ "architectures": [
5
+ "BertForSequenceClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.1,
8
+ "classifier_dropout": null,
9
+ "gradient_checkpointing": false,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "id2label": {
14
+ "0": "129.0",
15
+ "1": "131.0",
16
+ "2": "142.0",
17
+ "3": "149.0",
18
+ "4": "151.0",
19
+ "5": "159.0",
20
+ "6": "191.0",
21
+ "7": "192.0",
22
+ "8": "193.0",
23
+ "9": "199.0",
24
+ "10": "212.0",
25
+ "11": "222.0",
26
+ "12": "231.0",
27
+ "13": "232.0",
28
+ "14": "234.0",
29
+ "15": "234.1",
30
+ "16": "234.2",
31
+ "17": "234.3",
32
+ "18": "234.4",
33
+ "19": "235.0",
34
+ "20": "236.0",
35
+ "21": "239.0",
36
+ "22": "240.0",
37
+ "23": "251.0",
38
+ "24": "252.0",
39
+ "25": "262.0",
40
+ "26": "313.0",
41
+ "27": "314.0",
42
+ "28": "319.0",
43
+ "29": "321.0",
44
+ "30": "325.0",
45
+ "31": "330.0",
46
+ "32": "342.0",
47
+ "33": "350.0",
48
+ "34": "361.0",
49
+ "35": "362.0",
50
+ "36": "370.0",
51
+ "37": "380.0",
52
+ "38": "390.0",
53
+ "39": "410.0",
54
+ "40": "422.0",
55
+ "41": "423.0",
56
+ "42": "424.0",
57
+ "43": "429.0",
58
+ "44": "449.0",
59
+ "45": "490.0",
60
+ "46": "511.0",
61
+ "47": "512.0",
62
+ "48": "513.0",
63
+ "49": "519.0",
64
+ "50": "521.0",
65
+ "51": "523.0",
66
+ "52": "526.0",
67
+ "53": "529.0",
68
+ "54": "539.0",
69
+ "55": "611.0",
70
+ "56": "690.0"
71
+ },
72
+ "initializer_range": 0.02,
73
+ "intermediate_size": 3072,
74
+ "label2id": {
75
+ "129.0": 0,
76
+ "131.0": 1,
77
+ "142.0": 2,
78
+ "149.0": 3,
79
+ "151.0": 4,
80
+ "159.0": 5,
81
+ "191.0": 6,
82
+ "192.0": 7,
83
+ "193.0": 8,
84
+ "199.0": 9,
85
+ "212.0": 10,
86
+ "222.0": 11,
87
+ "231.0": 12,
88
+ "232.0": 13,
89
+ "234.0": 14,
90
+ "234.1": 15,
91
+ "234.2": 16,
92
+ "234.3": 17,
93
+ "234.4": 18,
94
+ "235.0": 19,
95
+ "236.0": 20,
96
+ "239.0": 21,
97
+ "240.0": 22,
98
+ "251.0": 23,
99
+ "252.0": 24,
100
+ "262.0": 25,
101
+ "313.0": 26,
102
+ "314.0": 27,
103
+ "319.0": 28,
104
+ "321.0": 29,
105
+ "325.0": 30,
106
+ "330.0": 31,
107
+ "342.0": 32,
108
+ "350.0": 33,
109
+ "361.0": 34,
110
+ "362.0": 35,
111
+ "370.0": 36,
112
+ "380.0": 37,
113
+ "390.0": 38,
114
+ "410.0": 39,
115
+ "422.0": 40,
116
+ "423.0": 41,
117
+ "424.0": 42,
118
+ "429.0": 43,
119
+ "449.0": 44,
120
+ "490.0": 45,
121
+ "511.0": 46,
122
+ "512.0": 47,
123
+ "513.0": 48,
124
+ "519.0": 49,
125
+ "521.0": 50,
126
+ "523.0": 51,
127
+ "526.0": 52,
128
+ "529.0": 53,
129
+ "539.0": 54,
130
+ "611.0": 55,
131
+ "690.0": 56
132
+ },
133
+ "layer_norm_eps": 1e-12,
134
+ "max_length": 192,
135
+ "max_position_embeddings": 512,
136
+ "model_type": "bert",
137
+ "num_attention_heads": 12,
138
+ "num_hidden_layers": 12,
139
+ "output_past": true,
140
+ "pad_token_id": 1,
141
+ "padding": "max_length",
142
+ "position_embedding_type": "absolute",
143
+ "problem_type": "single_label_classification",
144
+ "torch_dtype": "float32",
145
+ "transformers_version": "4.15.0",
146
+ "type_vocab_size": 2,
147
+ "use_cache": true,
148
+ "vocab_size": 31002
149
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28faadaed8290303b673e670753af67840e9b399795fe4792d6de3288953630d
3
+ size 439662957
sample_input.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:051a616b5679c0c719eafd5c53423db3c05ef23577496a172b37108eee34ee6b
3
+ size 5920
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": false, "do_basic_tokenize": true, "never_split": null, "model_max_length": 512, "special_tokens_map_file": "/root/.cache/huggingface/transformers/78141ed1e8dcc5ff370950397ca0d1c5c9da478f54ec14544187d8a93eff1a26.f982506b52498d4adb4bd491f593dc92b2ef6be61bfdbe9d30f53f963f9f5b66", "name_or_path": "AutoTrain", "tokenizer_class": "BertTokenizer"}
vocab.txt ADDED
The diff for this file is too large to render. See raw diff