parvalijaved commited on
Commit
824da13
•
1 Parent(s): eb2ed3e
Files changed (9) hide show
  1. .gitattributes +6 -32
  2. README.md +119 -0
  3. config.json +33 -0
  4. flax_model.msgpack +3 -0
  5. merges.txt +0 -0
  6. pytorch_model.bin +3 -0
  7. special_tokens_map.json +1 -0
  8. tf_model.h5 +3 -0
  9. vocab.json +0 -0
.gitattributes CHANGED
@@ -1,35 +1,9 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
  *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
2
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
 
 
 
 
4
  *.h5 filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  *.tflite filter=lfs diff=lfs merge=lfs -text
6
+ *.tar.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.ot filter=lfs diff=lfs merge=lfs -text
8
+ *.onnx filter=lfs diff=lfs merge=lfs -text
9
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
 
 
README.md ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ datasets:
3
+ - tweet_eval
4
+ language:
5
+ - en
6
+ ---
7
+ # Twitter-roBERTa-base for Sentiment Analysis
8
+
9
+ This is a roBERTa-base model trained on ~58M tweets and finetuned for sentiment analysis with the TweetEval benchmark. This model is suitable for English (for a similar multilingual model, see [XLM-T](https://huggingface.co/cardiffnlp/twitter-xlm-roberta-base-sentiment)).
10
+
11
+ - Reference Paper: [_TweetEval_ (Findings of EMNLP 2020)](https://arxiv.org/pdf/2010.12421.pdf).
12
+ - Git Repo: [Tweeteval official repository](https://github.com/cardiffnlp/tweeteval).
13
+
14
+ <b>Labels</b>:
15
+ 0 -> Negative;
16
+ 1 -> Neutral;
17
+ 2 -> Positive
18
+
19
+ <b>New!</b> We just released a new sentiment analysis model trained on more recent and a larger quantity of tweets.
20
+ See [twitter-roberta-base-sentiment-latest](https://huggingface.co/cardiffnlp/twitter-roberta-base-sentiment-latest) and [TweetNLP](https://tweetnlp.org) for more details.
21
+
22
+ ## Example of classification
23
+
24
+ ```python
25
+ from transformers import AutoModelForSequenceClassification
26
+ from transformers import TFAutoModelForSequenceClassification
27
+ from transformers import AutoTokenizer
28
+ import numpy as np
29
+ from scipy.special import softmax
30
+ import csv
31
+ import urllib.request
32
+
33
+ # Preprocess text (username and link placeholders)
34
+ def preprocess(text):
35
+ new_text = []
36
+
37
+
38
+ for t in text.split(" "):
39
+ t = '@user' if t.startswith('@') and len(t) > 1 else t
40
+ t = 'http' if t.startswith('http') else t
41
+ new_text.append(t)
42
+ return " ".join(new_text)
43
+
44
+ # Tasks:
45
+ # emoji, emotion, hate, irony, offensive, sentiment
46
+ # stance/abortion, stance/atheism, stance/climate, stance/feminist, stance/hillary
47
+
48
+ task='sentiment'
49
+ MODEL = f"cardiffnlp/twitter-roberta-base-{task}"
50
+
51
+ tokenizer = AutoTokenizer.from_pretrained(MODEL)
52
+
53
+ # download label mapping
54
+ labels=[]
55
+ mapping_link = f"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/{task}/mapping.txt"
56
+ with urllib.request.urlopen(mapping_link) as f:
57
+ html = f.read().decode('utf-8').split("\n")
58
+ csvreader = csv.reader(html, delimiter='\t')
59
+ labels = [row[1] for row in csvreader if len(row) > 1]
60
+
61
+ # PT
62
+ model = AutoModelForSequenceClassification.from_pretrained(MODEL)
63
+ model.save_pretrained(MODEL)
64
+
65
+ text = "Good night 😊"
66
+ text = preprocess(text)
67
+ encoded_input = tokenizer(text, return_tensors='pt')
68
+ output = model(**encoded_input)
69
+ scores = output[0][0].detach().numpy()
70
+ scores = softmax(scores)
71
+
72
+ # # TF
73
+ # model = TFAutoModelForSequenceClassification.from_pretrained(MODEL)
74
+ # model.save_pretrained(MODEL)
75
+
76
+ # text = "Good night 😊"
77
+ # encoded_input = tokenizer(text, return_tensors='tf')
78
+ # output = model(encoded_input)
79
+ # scores = output[0][0].numpy()
80
+ # scores = softmax(scores)
81
+
82
+ ranking = np.argsort(scores)
83
+ ranking = ranking[::-1]
84
+ for i in range(scores.shape[0]):
85
+ l = labels[ranking[i]]
86
+ s = scores[ranking[i]]
87
+ print(f"{i+1}) {l} {np.round(float(s), 4)}")
88
+
89
+ ```
90
+
91
+ Output:
92
+
93
+ ```
94
+ 1) positive 0.8466
95
+ 2) neutral 0.1458
96
+ 3) negative 0.0076
97
+ ```
98
+
99
+ ### BibTeX entry and citation info
100
+
101
+ Please cite the [reference paper](https://aclanthology.org/2020.findings-emnlp.148/) if you use this model.
102
+
103
+ ```bibtex
104
+ @inproceedings{barbieri-etal-2020-tweeteval,
105
+ title = "{T}weet{E}val: Unified Benchmark and Comparative Evaluation for Tweet Classification",
106
+ author = "Barbieri, Francesco and
107
+ Camacho-Collados, Jose and
108
+ Espinosa Anke, Luis and
109
+ Neves, Leonardo",
110
+ booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2020",
111
+ month = nov,
112
+ year = "2020",
113
+ address = "Online",
114
+ publisher = "Association for Computational Linguistics",
115
+ url = "https://aclanthology.org/2020.findings-emnlp.148",
116
+ doi = "10.18653/v1/2020.findings-emnlp.148",
117
+ pages = "1644--1650"
118
+ }
119
+ ```
config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "tweeteval_new/roberta-base-rt-sentiment/",
3
+ "architectures": [
4
+ "RobertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "eos_token_id": 2,
9
+ "gradient_checkpointing": false,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "id2label": {
14
+ "0": "LABEL_0",
15
+ "1": "LABEL_1",
16
+ "2": "LABEL_2"
17
+ },
18
+ "initializer_range": 0.02,
19
+ "intermediate_size": 3072,
20
+ "label2id": {
21
+ "LABEL_0": 0,
22
+ "LABEL_1": 1,
23
+ "LABEL_2": 2
24
+ },
25
+ "layer_norm_eps": 1e-05,
26
+ "max_position_embeddings": 514,
27
+ "model_type": "roberta",
28
+ "num_attention_heads": 12,
29
+ "num_hidden_layers": 12,
30
+ "pad_token_id": 1,
31
+ "type_vocab_size": 1,
32
+ "vocab_size": 50265
33
+ }
flax_model.msgpack ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4d892ae550c152fd6175b7be9841cd9d3509ae80ea4b0aefe28ba41ea610d4d
3
+ size 498598977
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c37a3484c55954cd75b336a85f1e0c023ae874f3a73b05d2418dd04828e293b1
3
+ size 498679497
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": "<mask>"}
tf_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60edb4641e2b28ee3ecfb272f96cb9a6b0a662f4072eaf294dd8a1fd8b8484f3
3
+ size 501229896
vocab.json ADDED
The diff for this file is too large to render. See raw diff