abhishek HF staff commited on
Commit
6b6859a
1 Parent(s): 1435380

Commit From AutoNLP

Browse files
.gitattributes CHANGED
@@ -25,3 +25,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar.gz filter=lfs diff=lfs merge=lfs -text
29
+ *.pkl filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags: autonlp
3
+ language: unk
4
+ widget:
5
+ - text: "I love AutoNLP 🤗"
6
+ datasets:
7
+ - test1345/autonlp-data-savesome
8
+ co2_eq_emissions: 5.714250590300453
9
+ ---
10
+
11
+ # Model Trained Using AutoNLP
12
+
13
+ - Problem type: Multi-class Classification
14
+ - Model ID: 631818261
15
+ - CO2 Emissions (in grams): 5.714250590300453
16
+
17
+ ## Validation Metrics
18
+
19
+ - Loss: 0.44651690125465393
20
+ - Accuracy: 0.8792873051224944
21
+ - Macro F1: 0.839261602941426
22
+ - Micro F1: 0.8792873051224943
23
+ - Weighted F1: 0.8790427387522044
24
+ - Macro Precision: 0.8407634723656228
25
+ - Micro Precision: 0.8792873051224944
26
+ - Weighted Precision: 0.8801219917819031
27
+ - Macro Recall: 0.8400328140795883
28
+ - Micro Recall: 0.8792873051224944
29
+ - Weighted Recall: 0.8792873051224944
30
+
31
+
32
+ ## Usage
33
+
34
+ You can use cURL to access this model:
35
+
36
+ ```
37
+ $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoNLP"}' https://api-inference.huggingface.co/models/test1345/autonlp-savesome-631818261
38
+ ```
39
+
40
+ Or Python API:
41
+
42
+ ```
43
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer
44
+
45
+ model = AutoModelForSequenceClassification.from_pretrained("test1345/autonlp-savesome-631818261", use_auth_token=True)
46
+
47
+ tokenizer = AutoTokenizer.from_pretrained("test1345/autonlp-savesome-631818261", use_auth_token=True)
48
+
49
+ inputs = tokenizer("I love AutoNLP", return_tensors="pt")
50
+
51
+ outputs = model(**inputs)
52
+ ```
config.json ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "AutoNLP",
3
+ "_num_labels": 18,
4
+ "architectures": [
5
+ "RobertaForSequenceClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.1,
8
+ "bos_token_id": 0,
9
+ "classifier_dropout": null,
10
+ "eos_token_id": 2,
11
+ "gradient_checkpointing": false,
12
+ "hidden_act": "gelu",
13
+ "hidden_dropout_prob": 0.1,
14
+ "hidden_size": 768,
15
+ "id2label": {
16
+ "0": "Baby",
17
+ "1": "Bereidingen of Charcuterie of Vis of Veggie",
18
+ "2": "Brood of Ontbijt",
19
+ "3": "Chips of Borrelhapjes",
20
+ "4": "Colruyt-beenhouwerij",
21
+ "5": "Conserven",
22
+ "6": "Dieetvoeding of Voedingssupplementen",
23
+ "7": "Diepvries",
24
+ "8": "Dranken",
25
+ "9": "Groenten en fruit",
26
+ "10": "Huisdieren",
27
+ "11": "Koeken of Chocolade of Snoep",
28
+ "12": "Kruidenierswaren of Droge voeding",
29
+ "13": "Lichaamsverzorging of Parfumerie",
30
+ "14": "Niet-voeding",
31
+ "15": "Onderhoud of Huishouden",
32
+ "16": "Wijn",
33
+ "17": "Zuivel"
34
+ },
35
+ "initializer_range": 0.02,
36
+ "intermediate_size": 3072,
37
+ "label2id": {
38
+ "Baby": 0,
39
+ "Bereidingen of Charcuterie of Vis of Veggie": 1,
40
+ "Brood of Ontbijt": 2,
41
+ "Chips of Borrelhapjes": 3,
42
+ "Colruyt-beenhouwerij": 4,
43
+ "Conserven": 5,
44
+ "Dieetvoeding of Voedingssupplementen": 6,
45
+ "Diepvries": 7,
46
+ "Dranken": 8,
47
+ "Groenten en fruit": 9,
48
+ "Huisdieren": 10,
49
+ "Koeken of Chocolade of Snoep": 11,
50
+ "Kruidenierswaren of Droge voeding": 12,
51
+ "Lichaamsverzorging of Parfumerie": 13,
52
+ "Niet-voeding": 14,
53
+ "Onderhoud of Huishouden": 15,
54
+ "Wijn": 16,
55
+ "Zuivel": 17
56
+ },
57
+ "layer_norm_eps": 1e-05,
58
+ "max_length": 64,
59
+ "max_position_embeddings": 514,
60
+ "model_type": "roberta",
61
+ "num_attention_heads": 12,
62
+ "num_hidden_layers": 12,
63
+ "output_past": true,
64
+ "pad_token_id": 1,
65
+ "padding": "max_length",
66
+ "position_embedding_type": "absolute",
67
+ "problem_type": "single_label_classification",
68
+ "torch_dtype": "float32",
69
+ "transformers_version": "4.15.0",
70
+ "type_vocab_size": 1,
71
+ "use_cache": true,
72
+ "vocab_size": 40000
73
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad6a242be0cd497ffbd175d495ce3ae6f1c58491eafe5ae49bb1a9a57274e0f6
3
+ size 467189229
sample_input.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6bfd2c345c6bb7096ce0144bef408854688e5b138ada1ae0eeaefe2732fb1a6a
3
+ size 2034
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": false}}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": {"content": "<unk>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "errors": "replace", "sep_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "cls_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "trim_offsets": true, "special_tokens_map_file": "./robbert-v2-dutch-base/special_tokens_map.json", "name_or_path": "AutoNLP", "tokenizer_class": "RobertaTokenizer"}
vocab.json ADDED
The diff for this file is too large to render. See raw diff