lrei commited on
Commit
d7ea396
1 Parent(s): d934782

Upload 8 files

Browse files
.gitattributes CHANGED
@@ -32,3 +32,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ config.json filter=lfs diff=lfs merge=lfs -text
config.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10823eebda46de0691dd95708759a6491756dfb5ff4a88a395310b1fc90de43e
3
+ size 502149120
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:236366f2eb83f61f8b4cf71f5f66e0f787a9779a13c4b6f91d94e3976b949572
3
+ size 498772789
results.txt ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"f1_micro": 0.6042841037204059, "f1_macro": 0.58999731368133, "p_macro": 0.5824760127594218, "p_micro": 0.5925925925925926, "r_macro": 0.6598429973636707, "r_micro": 0.61644623346751}
2
+ precision recall f1-score support
3
+
4
+ admiration 0.7660 0.3273 0.4586 110
5
+ amusement 0.7188 0.8846 0.7931 52
6
+ anger 0.7231 0.6528 0.6861 72
7
+ annoyance 0.5250 0.7368 0.6131 57
8
+ approval 0.8276 0.4898 0.6154 98
9
+ boredom 0.6486 0.9057 0.7559 53
10
+ calmness 0.6981 0.8222 0.7551 45
11
+ caring 0.7222 0.8125 0.7647 64
12
+ courage 0.5254 0.7381 0.6139 42
13
+ curiosity 0.7778 0.8358 0.8058 67
14
+ desire 0.7922 0.7531 0.7722 81
15
+ despair 0.7619 0.7273 0.7442 44
16
+ disappointment 0.4359 0.4359 0.4359 39
17
+ disapproval 0.5273 0.2613 0.3494 111
18
+ disgust 0.7714 0.3750 0.5047 72
19
+ doubt 0.6000 0.4186 0.4932 43
20
+ embarrassment 0.4828 0.6364 0.5490 22
21
+ envy 0.3171 0.9286 0.4727 14
22
+ excitement 0.5918 0.7632 0.6667 38
23
+ faith 0.3846 0.7692 0.5128 13
24
+ fear 0.4167 0.3846 0.4000 39
25
+ frustration 0.5833 0.6481 0.6140 54
26
+ gratitude 0.2333 0.5000 0.3182 14
27
+ greed 0.6296 0.6800 0.6538 25
28
+ grief 0.2727 0.8571 0.4138 14
29
+ guilt 0.4783 0.8462 0.6111 13
30
+ indifference 0.6200 0.8378 0.7126 37
31
+ joy 0.7576 0.4098 0.5319 61
32
+ love 0.6415 0.6800 0.6602 50
33
+ nervousness 0.5000 0.5000 0.5000 24
34
+ nostalgia 0.2547 0.9310 0.4000 29
35
+ optimism 0.5000 0.3784 0.4308 37
36
+ pain 0.3784 0.6364 0.4746 22
37
+ pride 0.5152 0.6296 0.5667 27
38
+ relief 0.5366 0.8800 0.6667 25
39
+ sadness 0.6591 0.5577 0.6042 52
40
+ surprise 0.7647 0.7222 0.7429 36
41
+ trust 0.7949 0.7209 0.7561 43
42
+
43
+ micro avg 0.5926 0.6164 0.6043 1739
44
+ macro avg 0.5825 0.6598 0.5900 1739
45
+ weighted avg 0.6415 0.6164 0.6016 1739
46
+ samples avg 0.6462 0.6390 0.5937 1739
47
+
special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": "<s>",
4
+ "cls_token": "<s>",
5
+ "eos_token": "</s>",
6
+ "errors": "replace",
7
+ "mask_token": "<mask>",
8
+ "model_max_length": 512,
9
+ "name_or_path": "roberta-base",
10
+ "pad_token": "<pad>",
11
+ "sep_token": "</s>",
12
+ "special_tokens_map_file": null,
13
+ "tokenizer_class": "RobertaTokenizer",
14
+ "trim_offsets": true,
15
+ "unk_token": "<unk>"
16
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff