cardiffnlp commited on
Commit
eba980e
β€’
1 Parent(s): 128350c

Adding model files

Browse files
Files changed (5) hide show
  1. README.md +87 -0
  2. config.json +23 -0
  3. pytorch_model.bin +3 -0
  4. sentencepiece.bpe.model +3 -0
  5. tokenizer.json +0 -0
README.md ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Twitter-roBERTa-base
2
+
3
+ This is a roBERTa-base model trained on ~58M tweets and finetuned for the emoji prediction task at Semeval 2018.
4
+ For full description: [_TweetEval_ benchmark (Findings of EMNLP 2020)](https://arxiv.org/pdf/2010.12421.pdf).
5
+ To evaluate this and other models on Twitter-specific data, please refer to the [Tweeteval official repository](https://github.com/cardiffnlp/tweeteval).
6
+
7
+ ## Example of classification
8
+
9
+ ```python
10
+ from transformers import AutoModelForSequenceClassification
11
+ from transformers import TFAutoModelForSequenceClassification
12
+ from transformers import AutoTokenizer
13
+ import numpy as np
14
+ from scipy.special import softmax
15
+ import csv
16
+ import urllib.request
17
+
18
+ # Tasks:
19
+ # emoji, emotion, hate, irony, offensive, sentiment
20
+ # stance/abortion, stance/atheism, stance/climate, stance/feminist, stance/hillary
21
+
22
+ task='emoji'
23
+ MODEL = f"cardiffnlp/twitter-roberta-base-{task}"
24
+
25
+ tokenizer = AutoTokenizer.from_pretrained(MODEL)
26
+
27
+ # download label mapping
28
+ labels=[]
29
+ mapping_link = f"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/{task}/mapping.txt"
30
+ with urllib.request.urlopen(mapping_link) as f:
31
+ html = f.read().decode('utf-8').split("\n")
32
+ spamreader = csv.reader(html[:-1], delimiter='\t')
33
+ labels = [row[1] for row in spamreader]
34
+
35
+ # PT
36
+ model = AutoModelForSequenceClassification.from_pretrained(MODEL)
37
+ model.save_pretrained(MODEL)
38
+
39
+ text = "Good night 😊"
40
+ encoded_input = tokenizer(text, return_tensors='pt')
41
+ output = model(**encoded_input)
42
+ scores = output[0][0].detach().numpy()
43
+ scores = softmax(scores)
44
+
45
+ # # TF
46
+ # model = TFAutoModelForSequenceClassification.from_pretrained(MODEL)
47
+ # model.save_pretrained(MODEL)
48
+
49
+ # text = "Good night 😊"
50
+ # encoded_input = tokenizer(text, return_tensors='tf')
51
+ # output = model(encoded_input)
52
+ # scores = output[0][0].numpy()
53
+ # scores = softmax(scores)
54
+
55
+ ranking = np.argsort(scores)
56
+ ranking = ranking[::-1]
57
+ for i in range(scores.shape[0]):
58
+ l = labels[ranking[i]]
59
+ s = scores[ranking[i]]
60
+ print(f"{i+1}) {l} {np.round(float(s), 4)}")
61
+
62
+ ```
63
+
64
+ Output:
65
+
66
+ ```
67
+ 1) 😘 0.2637
68
+ 2) ❀️ 0.1952
69
+ 3) πŸ’• 0.1171
70
+ 4) ✨ 0.0927
71
+ 5) 😊 0.0756
72
+ 6) πŸ’œ 0.046
73
+ 7) πŸ’™ 0.0444
74
+ 8) 😍 0.0272
75
+ 9) πŸ˜‰ 0.0228
76
+ 10) 😎 0.0198
77
+ 11) 😜 0.0166
78
+ 12) πŸ˜‚ 0.0132
79
+ 13) 😁 0.0131
80
+ 14) β˜€ 0.0112
81
+ 15) πŸŽ„ 0.009
82
+ 16) πŸ’― 0.009
83
+ 17) πŸ”₯ 0.008
84
+ 18) πŸ“· 0.0057
85
+ 19) πŸ‡ΊπŸ‡Έ 0.005
86
+ 20) πŸ“Έ 0.0048
87
+ ```
config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "XLMRobertaForMaskedLM"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "eos_token_id": 2,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-05,
15
+ "max_position_embeddings": 514,
16
+ "model_type": "xlm-roberta",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "output_past": true,
20
+ "pad_token_id": 1,
21
+ "type_vocab_size": 1,
22
+ "vocab_size": 250002
23
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a549331ad8806684e39e05fb47ec9cfb84fdab5ba203d838ca34b0b914c5bfa
3
+ size 1113236958
sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
3
+ size 5069051
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff