system HF staff commited on
Commit
fbbead0
1 Parent(s): 2c2c4fc

Commit From AutoTrain

Browse files
.gitattributes CHANGED
@@ -32,3 +32,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
36
+ *.tar.gz filter=lfs diff=lfs merge=lfs -text
37
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - autotrain
4
+ - text-classification
5
+ language:
6
+ - en
7
+ widget:
8
+ - text: "I love AutoTrain 🤗"
9
+ datasets:
10
+ - reachosen/autotrain-data-sdohv7
11
+ co2_eq_emissions:
12
+ emissions: 0.01134763220649804
13
+ ---
14
+
15
+ # Model Trained Using AutoTrain
16
+
17
+ - Problem type: Multi-class Classification
18
+ - Model ID: 3701198597
19
+ - CO2 Emissions (in grams): 0.0113
20
+
21
+ ## Validation Metrics
22
+
23
+ - Loss: 0.057
24
+ - Accuracy: 0.990
25
+ - Macro F1: 0.990
26
+ - Micro F1: 0.990
27
+ - Weighted F1: 0.990
28
+ - Macro Precision: 0.990
29
+ - Micro Precision: 0.990
30
+ - Weighted Precision: 0.991
31
+ - Macro Recall: 0.990
32
+ - Micro Recall: 0.990
33
+ - Weighted Recall: 0.990
34
+
35
+
36
+ ## Usage
37
+
38
+ You can use cURL to access this model:
39
+
40
+ ```
41
+ $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/reachosen/autotrain-sdohv7-3701198597
42
+ ```
43
+
44
+ Or Python API:
45
+
46
+ ```
47
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer
48
+
49
+ model = AutoModelForSequenceClassification.from_pretrained("reachosen/autotrain-sdohv7-3701198597", use_auth_token=True)
50
+
51
+ tokenizer = AutoTokenizer.from_pretrained("reachosen/autotrain-sdohv7-3701198597", use_auth_token=True)
52
+
53
+ inputs = tokenizer("I love AutoTrain", return_tensors="pt")
54
+
55
+ outputs = model(**inputs)
56
+ ```
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "[MASK]": 128000
3
+ }
config.json ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "AutoTrain",
3
+ "_num_labels": 23,
4
+ "architectures": [
5
+ "DebertaV2ForSequenceClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.1,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "Access to Care",
13
+ "1": "Access to Care Contradiction",
14
+ "2": "Depression",
15
+ "3": "Depression Contradiction",
16
+ "4": "Economic Instability",
17
+ "5": "Economic Instability Contradiction",
18
+ "6": "Employment Stress",
19
+ "7": "Employment Stress Contradiction",
20
+ "8": "Exposure to Violence",
21
+ "9": "Exposure to Violence Contradiction",
22
+ "10": "Food Insecurity ",
23
+ "11": "Food Insecurity Contradiction",
24
+ "12": "Housing Instability",
25
+ "13": "Housing Instability Contradiction",
26
+ "14": "Limited Language (English) Proficiency",
27
+ "15": "Limited Language Proficiency Contradiction",
28
+ "16": "Neutral",
29
+ "17": "Social Context",
30
+ "18": "Social Context Contradiction",
31
+ "19": "Substance Abuse",
32
+ "20": "Substance Abuse Contradiction",
33
+ "21": "Transportation",
34
+ "22": "Transportation Contradiction"
35
+ },
36
+ "initializer_range": 0.02,
37
+ "intermediate_size": 3072,
38
+ "label2id": {
39
+ "Access to Care": 0,
40
+ "Access to Care Contradiction": 1,
41
+ "Depression": 2,
42
+ "Depression Contradiction": 3,
43
+ "Economic Instability": 4,
44
+ "Economic Instability Contradiction": 5,
45
+ "Employment Stress": 6,
46
+ "Employment Stress Contradiction": 7,
47
+ "Exposure to Violence": 8,
48
+ "Exposure to Violence Contradiction": 9,
49
+ "Food Insecurity ": 10,
50
+ "Food Insecurity Contradiction": 11,
51
+ "Housing Instability": 12,
52
+ "Housing Instability Contradiction": 13,
53
+ "Limited Language (English) Proficiency": 14,
54
+ "Limited Language Proficiency Contradiction": 15,
55
+ "Neutral": 16,
56
+ "Social Context": 17,
57
+ "Social Context Contradiction": 18,
58
+ "Substance Abuse": 19,
59
+ "Substance Abuse Contradiction": 20,
60
+ "Transportation": 21,
61
+ "Transportation Contradiction": 22
62
+ },
63
+ "layer_norm_eps": 1e-07,
64
+ "max_length": 64,
65
+ "max_position_embeddings": 512,
66
+ "max_relative_positions": -1,
67
+ "model_type": "deberta-v2",
68
+ "norm_rel_ebd": "layer_norm",
69
+ "num_attention_heads": 12,
70
+ "num_hidden_layers": 12,
71
+ "pad_token_id": 0,
72
+ "padding": "max_length",
73
+ "pooler_dropout": 0,
74
+ "pooler_hidden_act": "gelu",
75
+ "pooler_hidden_size": 768,
76
+ "pos_att_type": [
77
+ "p2c",
78
+ "c2p"
79
+ ],
80
+ "position_biased_input": false,
81
+ "position_buckets": 256,
82
+ "relative_attention": true,
83
+ "share_att_key": true,
84
+ "torch_dtype": "float32",
85
+ "transformers_version": "4.25.1",
86
+ "type_vocab_size": 0,
87
+ "vocab_size": 128100
88
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:615732771a801e0b6172445e22bcdf81adbd36796c199e308d04eeedf9591c76
3
+ size 737833337
special_tokens_map.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "eos_token": "[SEP]",
5
+ "mask_token": "[MASK]",
6
+ "pad_token": "[PAD]",
7
+ "sep_token": "[SEP]",
8
+ "unk_token": "[UNK]"
9
+ }
spm.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c679fbf93643d19aab7ee10c0b99e460bdbc02fedf34b92b05af343b4af586fd
3
+ size 2464616
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4cc5a4f5b0be46bb77c12eeae3959316af6f2f567f647f0ea3af9434a4adf372
3
+ size 8656814
tokenizer_config.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "do_lower_case": false,
5
+ "eos_token": "[SEP]",
6
+ "mask_token": "[MASK]",
7
+ "model_max_length": 1000000000000000019884624838656,
8
+ "name_or_path": "AutoTrain",
9
+ "pad_token": "[PAD]",
10
+ "sep_token": "[SEP]",
11
+ "sp_model_kwargs": {},
12
+ "special_tokens_map_file": null,
13
+ "split_by_punct": false,
14
+ "tokenizer_class": "DebertaV2Tokenizer",
15
+ "unk_token": "[UNK]",
16
+ "vocab_type": "spm"
17
+ }