abhishek HF staff commited on
Commit
87c288d
โ€ข
1 Parent(s): c111091

Commit From AutoNLP

Browse files
.gitattributes CHANGED
@@ -25,3 +25,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar.gz filter=lfs diff=lfs merge=lfs -text
29
+ *.pkl filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags: autonlp
3
+ language: ar
4
+ widget:
5
+ - text: "I love AutoNLP ๐Ÿค—"
6
+ datasets:
7
+ - adelgasmi/autonlp-data-kpmg_nlp
8
+ co2_eq_emissions: 64.58945483765274
9
+ ---
10
+
11
+ # Model Trained Using AutoNLP
12
+
13
+ - Problem type: Multi-class Classification
14
+ - Model ID: 18833547
15
+ - CO2 Emissions (in grams): 64.58945483765274
16
+
17
+ ## Validation Metrics
18
+
19
+ - Loss: 0.14247722923755646
20
+ - Accuracy: 0.9586074193404036
21
+ - Macro F1: 0.9468339778730883
22
+ - Micro F1: 0.9586074193404036
23
+ - Weighted F1: 0.9585551117678807
24
+ - Macro Precision: 0.9445436604001405
25
+ - Micro Precision: 0.9586074193404036
26
+ - Weighted Precision: 0.9591405429662925
27
+ - Macro Recall: 0.9499427161888565
28
+ - Micro Recall: 0.9586074193404036
29
+ - Weighted Recall: 0.9586074193404036
30
+
31
+
32
+ ## Usage
33
+
34
+ You can use cURL to access this model:
35
+
36
+ ```
37
+ $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoNLP"}' https://api-inference.huggingface.co/models/adelgasmi/autonlp-kpmg_nlp-18833547
38
+ ```
39
+
40
+ Or Python API:
41
+
42
+ ```
43
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer
44
+
45
+ model = AutoModelForSequenceClassification.from_pretrained("adelgasmi/autonlp-kpmg_nlp-18833547", use_auth_token=True)
46
+
47
+ tokenizer = AutoTokenizer.from_pretrained("adelgasmi/autonlp-kpmg_nlp-18833547", use_auth_token=True)
48
+
49
+ inputs = tokenizer("I love AutoNLP", return_tensors="pt")
50
+
51
+ outputs = model(**inputs)
52
+ ```
config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "AutoNLP",
3
+ "_num_labels": 5,
4
+ "architectures": [
5
+ "BertForSequenceClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.1,
8
+ "directionality": "bidi",
9
+ "gradient_checkpointing": false,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "id2label": {
14
+ "0": "0",
15
+ "1": "1",
16
+ "2": "2",
17
+ "3": "3",
18
+ "4": "4"
19
+ },
20
+ "initializer_range": 0.02,
21
+ "intermediate_size": 3072,
22
+ "label2id": {
23
+ "0": 0,
24
+ "1": 1,
25
+ "2": 2,
26
+ "3": 3,
27
+ "4": 4
28
+ },
29
+ "layer_norm_eps": 1e-12,
30
+ "max_length": 128,
31
+ "max_position_embeddings": 512,
32
+ "model_type": "bert",
33
+ "num_attention_heads": 12,
34
+ "num_hidden_layers": 12,
35
+ "pad_token_id": 0,
36
+ "padding": "max_length",
37
+ "pooler_fc_size": 768,
38
+ "pooler_num_attention_heads": 12,
39
+ "pooler_num_fc_layers": 3,
40
+ "pooler_size_per_head": 128,
41
+ "pooler_type": "first_token_transform",
42
+ "position_embedding_type": "absolute",
43
+ "problem_type": "single_label_classification",
44
+ "transformers_version": "4.8.0",
45
+ "type_vocab_size": 2,
46
+ "use_cache": true,
47
+ "vocab_size": 64000
48
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2bb8f492dd992f6b6dd22868a55c9f40eecd3df40abdefeede4ee0483e057512
3
+ size 540872877
sample_input.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2cc6dab1fbb08e8f2dfbed79a7b65bafa6e8c692a96e44441637f787978cce98
3
+ size 4384
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"do_lower_case": false, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "max_len": 512, "do_basic_tokenize": true, "never_split": ["+ูˆุง", "ุณ+", "[ู…ุณุชุฎุฏู…]", "+ูƒ", "+ู‡ู…", "+ุฉ", "+ู†", "ู„ู„+", "[ุจุฑูŠุฏ]", "[ุฑุงุจุท]", "+ู‡", "+ูƒู†", "+ุง", "+ุงุช", "+ูŠ", "ุจ+", "+ู†ุง", "+ู‡ู†", "+ูƒู…", "ูƒ+", "+ูŠู†", "+ู‡ู…ุง", "ูˆ+", "+ูƒู…ุง", "+ุงู†", "+ุช", "+ูˆู†", "ู„+", "+ู‡ุง", "ุงู„+", "ู+"], "special_tokens_map_file": null, "full_tokenizer_file": null, "name_or_path": "AutoNLP", "tokenizer_class": "BertTokenizer"}
vocab.txt ADDED
The diff for this file is too large to render. See raw diff