ophelielacroix commited on
Commit
4b6008f
1 Parent(s): 459bfbc

initial commit

Browse files
README.md CHANGED
@@ -1,3 +1,37 @@
1
- ---
2
- license: cc-by-4.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - da
4
+ tags:
5
+ - electra
6
+ - pytorch
7
+ - hatespeech
8
+ license: cc-by-4.0
9
+ datasets:
10
+ - social media
11
+ metrics:
12
+ - f1
13
+ widget:
14
+ - text: "Senile gamle idiot"
15
+ ---
16
+
17
+ # Danish ELECTRA for hate speech (offensive language) detection
18
+
19
+ The ELECTRA Offensive model detects whether a Danish text is offensive or not.
20
+ It is based on the pretrained [Danish Ælæctra](Maltehb/aelaectra-danish-electra-small-cased) model.
21
+
22
+ See the [DaNLP documentation](https://danlp-alexandra.readthedocs.io/en/latest/docs/tasks/hatespeech.html#electra) for more details.
23
+
24
+
25
+ Here is how to use the model:
26
+
27
+ ```python
28
+ from transformers import ElectraTokenizer, ElectraForSequenceClassification
29
+
30
+ model = ElectraForSequenceClassification.from_pretrained("DaNLP/da-electra-hatespeech-detection")
31
+ tokenizer = ElectraTokenizer.from_pretrained("DaNLP/da-electra-hatespeech-detection")
32
+ ```
33
+
34
+ ## Training data
35
+
36
+ The data used for training has not been made publicly available. It consists of social media data manually annotated in collaboration with Danmarks Radio.
37
+
config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": ".",
3
+ "architectures": [
4
+ "ElectraForSequenceClassification"
5
+ ],
6
+ "id2label": {
7
+ "0": "not offensive",
8
+ "1": "offensive"
9
+ },
10
+ "label2id": {
11
+ "not offensive": 0,
12
+ "offensive": 1
13
+ },
14
+ "attention_probs_dropout_prob": 0.1,
15
+ "classifier_dropout": null,
16
+ "embedding_size": 128,
17
+ "generator_size": "0.25",
18
+ "hidden_act": "gelu",
19
+ "hidden_dropout_prob": 0.1,
20
+ "hidden_size": 256,
21
+ "initializer_range": 0.02,
22
+ "intermediate_size": 1024,
23
+ "layer_norm_eps": 1e-12,
24
+ "max_position_embeddings": 512,
25
+ "model_type": "electra",
26
+ "num_attention_heads": 4,
27
+ "num_hidden_layers": 12,
28
+ "pad_token_id": 0,
29
+ "position_embedding_type": "absolute",
30
+ "summary_activation": "gelu",
31
+ "summary_last_dropout": 0.1,
32
+ "summary_type": "first",
33
+ "summary_use_proj": true,
34
+ "transformers_version": "4.5.0",
35
+ "type_vocab_size": 2,
36
+ "vocab_size": 32000
37
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:62e42851587ac602a957534251bae9f3855468105a48c9f87218062286e9fdf2
3
+ size 55043297
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": false, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "special_tokens_map_file": null, "full_tokenizer_file": null, "model_max_length": 128, "name_or_path": "Maltehb/-l-ctra-danish-electra-small-cased", "do_basic_tokenize": true, "never_split": null}
vocab.txt ADDED
The diff for this file is too large to render. See raw diff