finiteautomata commited on
Commit
b76832d
1 Parent(s): 3677e60

First version

Browse files
README.md ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Sentiment Analysis in Spanish
2
+ ## robertuito-sentiment-analysis
3
+
4
+ Repository: [https://github.com/pysentimiento/pysentimiento/](https://github.com/finiteautomata/pysentimiento/)
5
+
6
+
7
+
8
+ Model trained with SemEval 2019 Task 5: HatEval (SubTask B) corpus for Hate Speech detection in Spanish. Base model is [RoBERTuito](https://github.com/pysentimiento/robertuito), a RoBERTa model trained in Spanish tweets.
9
+
10
+ It is a multi-classifier model, with the following classes:
11
+
12
+ - **HS**: is it hate speech?
13
+ - **TR**: is it targeted to a specific individual?
14
+ - **AG**: is it aggressive?
15
+
16
+ ## Results
17
+
18
+ Results for the four tasks evaluated in `pysentimiento`. Results are expressed as Macro F1 scores
19
+
20
+
21
+ | model | emotion | hate_speech | irony | sentiment |
22
+ |:--------------|:--------------|:--------------|:--------------|:--------------|
23
+ | robertuito | 0.560 ± 0.010 | 0.759 ± 0.007 | 0.739 ± 0.005 | 0.705 ± 0.003 |
24
+ | roberta | 0.527 ± 0.015 | 0.741 ± 0.012 | 0.721 ± 0.008 | 0.670 ± 0.006 |
25
+ | bertin | 0.524 ± 0.007 | 0.738 ± 0.007 | 0.713 ± 0.012 | 0.666 ± 0.005 |
26
+ | beto_uncased | 0.532 ± 0.012 | 0.727 ± 0.016 | 0.701 ± 0.007 | 0.651 ± 0.006 |
27
+ | beto_cased | 0.516 ± 0.012 | 0.724 ± 0.012 | 0.705 ± 0.009 | 0.662 ± 0.005 |
28
+ | mbert_uncased | 0.493 ± 0.010 | 0.718 ± 0.011 | 0.681 ± 0.010 | 0.617 ± 0.003 |
29
+ | biGRU | 0.264 ± 0.007 | 0.592 ± 0.018 | 0.631 ± 0.011 | 0.585 ± 0.011 |
30
+
31
+
32
+ Note that for Hate Speech, these are the results for Semeval 2019, Task 5 Subtask B (HS+TR+AG detection)
33
+
34
+ ## Citation
35
+
36
+ If you use this model in your research, please cite pysentimiento and RoBERTuito papers:
37
+
38
+ ```
39
+ @misc{perez2021pysentimiento,
40
+ title={pysentimiento: A Python Toolkit for Sentiment Analysis and SocialNLP tasks},
41
+ author={Juan Manuel Pérez and Juan Carlos Giudici and Franco Luque},
42
+ year={2021},
43
+ eprint={2106.09462},
44
+ archivePrefix={arXiv},
45
+ primaryClass={cs.CL}
46
+ }
47
+ @misc{perez2021robertuito,
48
+ title={RoBERTuito: a pre-trained language model for social media text in Spanish},
49
+ author={Juan Manuel Pérez and Damián A. Furman and Laura Alonso Alemany and Franco Luque},
50
+ year={2021},
51
+ eprint={2111.09453},
52
+ archivePrefix={arXiv},
53
+ primaryClass={cs.CL}
54
+ }
55
+ ```
config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "pysentimiento/robertuito-base-uncased",
3
+ "architectures": [
4
+ "RobertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "gradient_checkpointing": false,
11
+ "hidden_act": "gelu",
12
+ "hidden_dropout_prob": 0.1,
13
+ "hidden_size": 768,
14
+ "id2label": {
15
+ "0": "hateful",
16
+ "1": "targeted",
17
+ "2": "aggressive"
18
+ },
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 3072,
21
+ "label2id": {
22
+ "aggressive": 2,
23
+ "hateful": 0,
24
+ "targeted": 1
25
+ },
26
+ "layer_norm_eps": 1e-12,
27
+ "max_position_embeddings": 130,
28
+ "model_type": "roberta",
29
+ "num_attention_heads": 12,
30
+ "num_hidden_layers": 12,
31
+ "pad_token_id": 1,
32
+ "position_embedding_type": "absolute",
33
+ "problem_type": "multi_label_classification",
34
+ "torch_dtype": "float32",
35
+ "transformers_version": "4.11.3",
36
+ "type_vocab_size": 1,
37
+ "use_cache": true,
38
+ "vocab_size": 30002
39
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:169eabf45b7e489e38711439c9c217e2ad1fcf955fb14cad32816e835ae4a688
3
+ size 435246509
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": "<mask>"}
test_results.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "test_loss": 0.4132111370563507,
3
+ "test_hs_f1": 0.7729658792650919,
4
+ "test_hs_precision": 0.6817129629629629,
5
+ "test_hs_recall": 0.8924242424242425,
6
+ "test_tr_f1": 0.8071428571428573,
7
+ "test_tr_precision": 0.8129496402877698,
8
+ "test_tr_recall": 0.8014184397163121,
9
+ "test_ag_f1": 0.7058823529411765,
10
+ "test_ag_precision": 0.6111111111111112,
11
+ "test_ag_recall": 0.8354430379746836,
12
+ "test_macro_hs_f1_score": 0.7832609825919732,
13
+ "test_emr_no_gating": 0.67,
14
+ "test_emr": 0.67125,
15
+ "test_macro_f1": 0.7619970440864563,
16
+ "test_macro_precision": 0.7019245624542236,
17
+ "test_macro_recall": 0.8430952429771423,
18
+ "test_runtime": 3.1919,
19
+ "test_samples_per_second": 501.263,
20
+ "test_steps_per_second": 31.329
21
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "sep_token": "</s>", "cls_token": "<s>", "unk_token": "<unk>", "pad_token": "<pad>", "mask_token": "<mask>", "special_tokens_map_file": "models/twerto-base-uncased/special_tokens_map.json", "name_or_path": "pysentimiento/robertuito-base-uncased", "tokenizer_class": "PreTrainedTokenizerFast"}
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d7c98dc95c82c2b1466b8a6920bab4012a36d689e7a2c2b18fd1bb425546b5a
3
+ size 2799