Upload 8 files
Browse files- CECorrelationEvaluator_sts-dev_results.csv +37 -0
- added_tokens.json +3 -0
- config.json +33 -0
- pytorch_model.bin +3 -0
- special_tokens_map.json +15 -0
- tokenizer.json +0 -0
- tokenizer_config.json +15 -0
- unigram.json +0 -0
CECorrelationEvaluator_sts-dev_results.csv
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
epoch,steps,Pearson_Correlation,Spearman_Correlation
|
2 |
+
0,60,0.8108205119430657,0.6775500232174001
|
3 |
+
0,120,0.8126328070054158,0.719739962898035
|
4 |
+
0,180,0.8197494756490264,0.7367440528613669
|
5 |
+
0,240,0.8143788514683798,0.7722194666358607
|
6 |
+
0,300,0.8554703528954702,0.7797734174875973
|
7 |
+
0,360,0.8867136080137896,0.7966013813933611
|
8 |
+
0,420,0.8900547052794784,0.7931161548937005
|
9 |
+
0,480,0.9072449185540117,0.8219332520164013
|
10 |
+
0,-1,0.8886268436096315,0.8120410355148108
|
11 |
+
1,60,0.914324981727741,0.8304331840280006
|
12 |
+
1,120,0.9106096783837855,0.8318911846496169
|
13 |
+
1,180,0.9044580315593339,0.8224347349147864
|
14 |
+
1,240,0.8944994256988071,0.819628399980797
|
15 |
+
1,300,0.9211216051233405,0.8410314162144492
|
16 |
+
1,360,0.9119642456328265,0.844677202361708
|
17 |
+
1,420,0.9214798722699171,0.8450922495802143
|
18 |
+
1,480,0.927514325902071,0.8537073105305835
|
19 |
+
1,-1,0.9285561628938842,0.8530424809877916
|
20 |
+
2,60,0.9270909200316917,0.8524662719447608
|
21 |
+
2,120,0.9322281426535477,0.8629911818302881
|
22 |
+
2,180,0.933861917234177,0.8558616087839614
|
23 |
+
2,240,0.9262076193768929,0.8578525231268871
|
24 |
+
2,300,0.9378297997045202,0.8680296726322818
|
25 |
+
2,360,0.9314410012663609,0.8665722314198852
|
26 |
+
2,420,0.9355678698602042,0.8673909056711858
|
27 |
+
2,480,0.9353656798854632,0.8708261286979041
|
28 |
+
2,-1,0.9370043250789034,0.8710440544331869
|
29 |
+
3,60,0.9300760916829612,0.8645785960123589
|
30 |
+
3,120,0.9363664381437894,0.8689902810893334
|
31 |
+
3,180,0.935638413275817,0.8672896061109865
|
32 |
+
3,240,0.9344740222477245,0.8666020158105195
|
33 |
+
3,300,0.9357739782686892,0.8704978849799261
|
34 |
+
3,360,0.9380924454725607,0.8720309226726812
|
35 |
+
3,420,0.9385245749842436,0.871922223492038
|
36 |
+
3,480,0.9362827784888671,0.8718493338330381
|
37 |
+
3,-1,0.9360742942528038,0.8718174291678207
|
added_tokens.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"<mask>": 128000
|
3 |
+
}
|
config.json
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "sdadas/polish-roberta-large-v2",
|
3 |
+
"architectures": [
|
4 |
+
"RobertaForSequenceClassification"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"bos_token_id": 0,
|
8 |
+
"classifier_dropout": null,
|
9 |
+
"eos_token_id": 2,
|
10 |
+
"hidden_act": "gelu",
|
11 |
+
"hidden_dropout_prob": 0.1,
|
12 |
+
"hidden_size": 1024,
|
13 |
+
"id2label": {
|
14 |
+
"0": "LABEL_0"
|
15 |
+
},
|
16 |
+
"initializer_range": 0.02,
|
17 |
+
"intermediate_size": 4096,
|
18 |
+
"label2id": {
|
19 |
+
"LABEL_0": 0
|
20 |
+
},
|
21 |
+
"layer_norm_eps": 1e-05,
|
22 |
+
"max_position_embeddings": 514,
|
23 |
+
"model_type": "roberta",
|
24 |
+
"num_attention_heads": 16,
|
25 |
+
"num_hidden_layers": 24,
|
26 |
+
"pad_token_id": 1,
|
27 |
+
"position_embedding_type": "absolute",
|
28 |
+
"torch_dtype": "float32",
|
29 |
+
"transformers_version": "4.32.0.dev0",
|
30 |
+
"type_vocab_size": 1,
|
31 |
+
"use_cache": true,
|
32 |
+
"vocab_size": 128001
|
33 |
+
}
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6f00eb23a6fa7f8f902edc3a266171e6545be101c7bd5867967f25fae3f90718
|
3 |
+
size 1739985329
|
special_tokens_map.json
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": "<s>",
|
3 |
+
"cls_token": "<s>",
|
4 |
+
"eos_token": "</s>",
|
5 |
+
"mask_token": {
|
6 |
+
"content": "<mask>",
|
7 |
+
"lstrip": true,
|
8 |
+
"normalized": false,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false
|
11 |
+
},
|
12 |
+
"pad_token": "<pad>",
|
13 |
+
"sep_token": "</s>",
|
14 |
+
"unk_token": "<unk>"
|
15 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_prefix_space": false,
|
3 |
+
"bos_token": "<s>",
|
4 |
+
"clean_up_tokenization_spaces": true,
|
5 |
+
"cls_token": "<s>",
|
6 |
+
"eos_token": "</s>",
|
7 |
+
"errors": "replace",
|
8 |
+
"mask_token": "<mask>",
|
9 |
+
"model_max_length": 1000000000000000019884624838656,
|
10 |
+
"pad_token": "<pad>",
|
11 |
+
"sep_token": "</s>",
|
12 |
+
"tokenizer_class": "RobertaTokenizer",
|
13 |
+
"trim_offsets": true,
|
14 |
+
"unk_token": "<unk>"
|
15 |
+
}
|
unigram.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|