abhishek HF staff commited on
Commit
73d52ac
1 Parent(s): 583726c

Commit From AutoNLP

Browse files
README.md ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags: autonlp
3
+ language: fr
4
+ widget:
5
+ - text: "I love AutoNLP 🤗"
6
+ datasets:
7
+ - pierreant-p/autonlp-data-jcvd-or-linkedin
8
+ ---
9
+
10
+ # Model Trained Using AutoNLP
11
+
12
+ - Problem type: Multi-class Classification
13
+ - Model ID: 3471039
14
+
15
+ ## Validation Metrics
16
+
17
+ - Loss: 0.6704344749450684
18
+ - Accuracy: 0.59375
19
+ - Macro F1: 0.37254901960784315
20
+ - Micro F1: 0.59375
21
+ - Weighted F1: 0.4424019607843137
22
+ - Macro Precision: 0.296875
23
+ - Micro Precision: 0.59375
24
+ - Weighted Precision: 0.3525390625
25
+ - Macro Recall: 0.5
26
+ - Micro Recall: 0.59375
27
+ - Weighted Recall: 0.59375
28
+
29
+
30
+ ## Usage
31
+
32
+ You can use cURL to access this model:
33
+
34
+ ```
35
+ $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoNLP"}' https://api-inference.huggingface.co/models/pierreant-p/autonlp-jcvd-or-linkedin-3471039
36
+ ```
37
+
38
+ Or Python API:
39
+
40
+ ```
41
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer
42
+
43
+ model = AutoModelForSequenceClassification.from_pretrained("pierreant-p/autonlp-jcvd-or-linkedin-3471039", use_auth_token=True)
44
+
45
+ tokenizer = AutoTokenizer.from_pretrained("pierreant-p/autonlp-jcvd-or-linkedin-3471039", use_auth_token=True)
46
+
47
+ inputs = tokenizer("I love AutoNLP", return_tensors="pt")
48
+
49
+ outputs = model(**inputs)
50
+ ```
config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "AutoNLP",
3
+ "_num_labels": 2,
4
+ "architectures": [
5
+ "CamembertForSequenceClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.1,
8
+ "bos_token_id": 0,
9
+ "eos_token_id": 2,
10
+ "eos_token_ids": 0,
11
+ "gradient_checkpointing": false,
12
+ "hidden_act": "gelu",
13
+ "hidden_dropout_prob": 0.1,
14
+ "hidden_size": 768,
15
+ "id2label": {
16
+ "0": "JCVD",
17
+ "1": "LinkedIn"
18
+ },
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 3072,
21
+ "label2id": {
22
+ "JCVD": 0,
23
+ "LinkedIn": 1
24
+ },
25
+ "layer_norm_eps": 1e-05,
26
+ "max_length": 64,
27
+ "max_position_embeddings": 514,
28
+ "model_type": "camembert",
29
+ "num_attention_heads": 12,
30
+ "num_hidden_layers": 12,
31
+ "output_past": true,
32
+ "pad_token_id": 0,
33
+ "padding": "max_length",
34
+ "position_embedding_type": "absolute",
35
+ "problem_type": "single_label_classification",
36
+ "transformers_version": "4.8.0",
37
+ "type_vocab_size": 1,
38
+ "use_cache": true,
39
+ "vocab_size": 32005
40
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:743eba6c234cb05f102339e0ab9b8c0678af6a0d3adee63fb7c0aaa6da5e9f68
3
+ size 442584521
sample_input.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ccb3c6ac5e87e972659c3e469f3cc8f652d3e46a58a42ffdf80a5e22b924bce0
3
+ size 2034
sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:988bc5a00281c6d210a5d34bd143d0363741a432fefe741bf71e61b1869d4314
3
+ size 810912
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true}, "additional_special_tokens": ["<s>NOTUSED", "</s>NOTUSED"]}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "sep_token": "</s>", "cls_token": "<s>", "unk_token": "<unk>", "pad_token": "<pad>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "additional_special_tokens": ["<s>NOTUSED", "</s>NOTUSED"], "special_tokens_map_file": null, "name_or_path": "AutoNLP", "sp_model_kwargs": {}, "tokenizer_class": "CamembertTokenizer"}