d.tsimerman commited on
Commit
2489de7
1 Parent(s): e5bbd67

initial commit

Browse files
README.md CHANGED
@@ -1,3 +1,33 @@
1
  ---
 
 
2
  license: mit
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ tags:
3
+ - bert
4
  license: mit
5
+ language:
6
+ - ru
7
  ---
8
+
9
+ # multilabel-context-russian-inapropriate-messages
10
+
11
+ [BERT classifier from Skoltech](https://huggingface.co/Skoltech/russian-inappropriate-messages), finetuned on contextual data with 4 labels.
12
+
13
+ # Training
14
+
15
+ *Skoltech/russian-inappropriate-messages* was finetuned on a multiclass data with four classes
16
+
17
+ 1) OK label -- the message is OK in context and does not intent to offend or somehow harm the reputation of a speaker.
18
+ 2) Toxic label -- the message might be seen as a offensive one in given context.
19
+ 3) Severe toxic label -- the message is offencive, full of anger and was written to provoke a fight or any other discomfort
20
+ 4) Risks label -- the message touches on sensitive topics and can harm the reputation of the speaker (i.e. religion, politics)
21
+
22
+ The model was finetuned on DATASET_LINK.
23
+
24
+ # Evaluation results
25
+
26
+ Model achieves the following results:
27
+
28
+ | | OK - Precision | OK - Recall | OK - F1-score | TOXIC - Precision | TOXIC - Recall | TOXIC - F1-score | SEVERE TOXIC - Precision | SEVERE TOXIC - Recall | SEVERE TOXIC - F1-score | RISKS - Precision | RISKS - Recall | RISKS - F1-score |
29
+ |-------------------------|----------------|-------------|---------------|-------------------|----------------|------------------|--------------------------|-----------------------|-------------------------|-------------------|----------------|------------------|
30
+ | DATASET_TWITTER val.csv | 0.883 | 0.913 | 0.896 | 0.368 | 0.330 | 0.348 | 0.515 | 0.468 | 0.490 | 0.659 | 0.535 | 0.591 |
31
+ | DATASET_GENA val.csv | 0.953 | 0.927 | 0.940 | 0.260 | 0.343 | 0.295 | 0.666 | 0.806 | 0.729 | 0.523 | 0.423 | 0.46 |
32
+
33
+ The work was done during internship at Tinkoff.
added_tokens.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"[RESPONSE_TOKEN]": 100792}
config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./context-russian-inappropriate-messages/checkpoint-584/",
3
+ "architectures": [
4
+ "BertModel"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "directionality": "bidi",
9
+ "gradient_checkpointing": false,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "id2label": {
14
+ "0": "ok",
15
+ "1": "risks",
16
+ "2": "severe_toxic",
17
+ "3": "toxic"
18
+ },
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 3072,
21
+ "label2id": {
22
+ "ok": 0,
23
+ "risks": 1,
24
+ "severe_toxic": 2,
25
+ "toxic": 3
26
+ },
27
+ "layer_norm_eps": 1e-12,
28
+ "max_position_embeddings": 512,
29
+ "model_type": "bert",
30
+ "num_attention_heads": 12,
31
+ "num_hidden_layers": 12,
32
+ "output_past": true,
33
+ "pad_token_id": 0,
34
+ "pooler_fc_size": 768,
35
+ "pooler_num_attention_heads": 12,
36
+ "pooler_num_fc_layers": 3,
37
+ "pooler_size_per_head": 128,
38
+ "pooler_type": "first_token_transform",
39
+ "position_embedding_type": "absolute",
40
+ "problem_type": "single_label_classification",
41
+ "torch_dtype": "float32",
42
+ "transformers_version": "4.18.0",
43
+ "type_vocab_size": 2,
44
+ "use_cache": true,
45
+ "vocab_size": 100793
46
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f83b623726d48538a077857238102133c33838562cb1b7706c07551ab291392
3
+ size 653868081
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "additional_special_tokens": ["[RESPONSE_TOKEN]"]}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": false, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "do_basic_tokenize": true, "never_split": null, "use_fast": true, "special_tokens_map_file": "/root/.cache/huggingface/transformers/1f428acdde727eed5de979d6856ce350a470be2a64e134a1fdae04af78a27301.dd8bd9bfd3664b530ea4e645105f557769387b3da9f79bdb55ed556bdd80611d", "name_or_path": "./context-russian-inappropriate-messages/checkpoint-584/", "tokenizer_class": "BertTokenizer"}
vocab.txt ADDED
The diff for this file is too large to render. See raw diff