Training in progress, epoch 1
Browse files- added_tokens.json +52 -0
- config.json +36 -0
- merges.txt +0 -0
- pytorch_model.bin +3 -0
- special_tokens_map.json +67 -0
- tokenizer.json +0 -0
- tokenizer_config.json +15 -0
- training_args.bin +3 -0
- vocab.json +0 -0
added_tokens.json
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"[PAD_left_10]": 50274,
|
3 |
+
"[PAD_left_11]": 50275,
|
4 |
+
"[PAD_left_12]": 50276,
|
5 |
+
"[PAD_left_13]": 50277,
|
6 |
+
"[PAD_left_14]": 50278,
|
7 |
+
"[PAD_left_15]": 50279,
|
8 |
+
"[PAD_left_16]": 50280,
|
9 |
+
"[PAD_left_17]": 50281,
|
10 |
+
"[PAD_left_18]": 50282,
|
11 |
+
"[PAD_left_19]": 50283,
|
12 |
+
"[PAD_left_1]": 50265,
|
13 |
+
"[PAD_left_20]": 50284,
|
14 |
+
"[PAD_left_21]": 50285,
|
15 |
+
"[PAD_left_22]": 50286,
|
16 |
+
"[PAD_left_23]": 50287,
|
17 |
+
"[PAD_left_24]": 50288,
|
18 |
+
"[PAD_left_25]": 50289,
|
19 |
+
"[PAD_left_26]": 50290,
|
20 |
+
"[PAD_left_27]": 50291,
|
21 |
+
"[PAD_left_28]": 50292,
|
22 |
+
"[PAD_left_29]": 50293,
|
23 |
+
"[PAD_left_2]": 50266,
|
24 |
+
"[PAD_left_30]": 50294,
|
25 |
+
"[PAD_left_31]": 50295,
|
26 |
+
"[PAD_left_32]": 50296,
|
27 |
+
"[PAD_left_33]": 50297,
|
28 |
+
"[PAD_left_34]": 50298,
|
29 |
+
"[PAD_left_35]": 50299,
|
30 |
+
"[PAD_left_36]": 50300,
|
31 |
+
"[PAD_left_37]": 50301,
|
32 |
+
"[PAD_left_38]": 50302,
|
33 |
+
"[PAD_left_39]": 50303,
|
34 |
+
"[PAD_left_3]": 50267,
|
35 |
+
"[PAD_left_40]": 50304,
|
36 |
+
"[PAD_left_41]": 50305,
|
37 |
+
"[PAD_left_42]": 50306,
|
38 |
+
"[PAD_left_43]": 50307,
|
39 |
+
"[PAD_left_44]": 50308,
|
40 |
+
"[PAD_left_45]": 50309,
|
41 |
+
"[PAD_left_46]": 50310,
|
42 |
+
"[PAD_left_47]": 50311,
|
43 |
+
"[PAD_left_48]": 50312,
|
44 |
+
"[PAD_left_49]": 50313,
|
45 |
+
"[PAD_left_4]": 50268,
|
46 |
+
"[PAD_left_50]": 50314,
|
47 |
+
"[PAD_left_5]": 50269,
|
48 |
+
"[PAD_left_6]": 50270,
|
49 |
+
"[PAD_left_7]": 50271,
|
50 |
+
"[PAD_left_8]": 50272,
|
51 |
+
"[PAD_left_9]": 50273
|
52 |
+
}
|
config.json
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "roberta-base",
|
3 |
+
"architectures": [
|
4 |
+
"RobertaForSequenceClassification"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"bos_token_id": 0,
|
8 |
+
"classifier_dropout": null,
|
9 |
+
"eos_token_id": 2,
|
10 |
+
"hidden_act": "gelu",
|
11 |
+
"hidden_dropout_prob": 0.1,
|
12 |
+
"hidden_size": 768,
|
13 |
+
"id2label": {
|
14 |
+
"0": "NEGATIVE",
|
15 |
+
"1": "POSITIVE"
|
16 |
+
},
|
17 |
+
"initializer_range": 0.02,
|
18 |
+
"intermediate_size": 3072,
|
19 |
+
"label2id": {
|
20 |
+
"NEGATIVE": 0,
|
21 |
+
"POSITIVE": 1
|
22 |
+
},
|
23 |
+
"layer_norm_eps": 1e-05,
|
24 |
+
"max_position_embeddings": 514,
|
25 |
+
"model_type": "roberta",
|
26 |
+
"num_attention_heads": 12,
|
27 |
+
"num_hidden_layers": 12,
|
28 |
+
"pad_token_id": 1,
|
29 |
+
"position_embedding_type": "absolute",
|
30 |
+
"problem_type": "single_label_classification",
|
31 |
+
"torch_dtype": "float32",
|
32 |
+
"transformers_version": "4.33.2",
|
33 |
+
"type_vocab_size": 1,
|
34 |
+
"use_cache": true,
|
35 |
+
"vocab_size": 50315
|
36 |
+
}
|
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:75abdcee92169b386d635f1cf425928a5a9315cc9d8eb125aa11f08e73cfdd16
|
3 |
+
size 498811249
|
special_tokens_map.json
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"additional_special_tokens": [
|
3 |
+
"[PAD_left_1]",
|
4 |
+
"[PAD_left_2]",
|
5 |
+
"[PAD_left_3]",
|
6 |
+
"[PAD_left_4]",
|
7 |
+
"[PAD_left_5]",
|
8 |
+
"[PAD_left_6]",
|
9 |
+
"[PAD_left_7]",
|
10 |
+
"[PAD_left_8]",
|
11 |
+
"[PAD_left_9]",
|
12 |
+
"[PAD_left_10]",
|
13 |
+
"[PAD_left_11]",
|
14 |
+
"[PAD_left_12]",
|
15 |
+
"[PAD_left_13]",
|
16 |
+
"[PAD_left_14]",
|
17 |
+
"[PAD_left_15]",
|
18 |
+
"[PAD_left_16]",
|
19 |
+
"[PAD_left_17]",
|
20 |
+
"[PAD_left_18]",
|
21 |
+
"[PAD_left_19]",
|
22 |
+
"[PAD_left_20]",
|
23 |
+
"[PAD_left_21]",
|
24 |
+
"[PAD_left_22]",
|
25 |
+
"[PAD_left_23]",
|
26 |
+
"[PAD_left_24]",
|
27 |
+
"[PAD_left_25]",
|
28 |
+
"[PAD_left_26]",
|
29 |
+
"[PAD_left_27]",
|
30 |
+
"[PAD_left_28]",
|
31 |
+
"[PAD_left_29]",
|
32 |
+
"[PAD_left_30]",
|
33 |
+
"[PAD_left_31]",
|
34 |
+
"[PAD_left_32]",
|
35 |
+
"[PAD_left_33]",
|
36 |
+
"[PAD_left_34]",
|
37 |
+
"[PAD_left_35]",
|
38 |
+
"[PAD_left_36]",
|
39 |
+
"[PAD_left_37]",
|
40 |
+
"[PAD_left_38]",
|
41 |
+
"[PAD_left_39]",
|
42 |
+
"[PAD_left_40]",
|
43 |
+
"[PAD_left_41]",
|
44 |
+
"[PAD_left_42]",
|
45 |
+
"[PAD_left_43]",
|
46 |
+
"[PAD_left_44]",
|
47 |
+
"[PAD_left_45]",
|
48 |
+
"[PAD_left_46]",
|
49 |
+
"[PAD_left_47]",
|
50 |
+
"[PAD_left_48]",
|
51 |
+
"[PAD_left_49]",
|
52 |
+
"[PAD_left_50]"
|
53 |
+
],
|
54 |
+
"bos_token": "<s>",
|
55 |
+
"cls_token": "<s>",
|
56 |
+
"eos_token": "</s>",
|
57 |
+
"mask_token": {
|
58 |
+
"content": "<mask>",
|
59 |
+
"lstrip": true,
|
60 |
+
"normalized": false,
|
61 |
+
"rstrip": false,
|
62 |
+
"single_word": false
|
63 |
+
},
|
64 |
+
"pad_token": "<pad>",
|
65 |
+
"sep_token": "</s>",
|
66 |
+
"unk_token": "<unk>"
|
67 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_prefix_space": false,
|
3 |
+
"bos_token": "<s>",
|
4 |
+
"clean_up_tokenization_spaces": true,
|
5 |
+
"cls_token": "<s>",
|
6 |
+
"eos_token": "</s>",
|
7 |
+
"errors": "replace",
|
8 |
+
"mask_token": "<mask>",
|
9 |
+
"model_max_length": 512,
|
10 |
+
"pad_token": "<pad>",
|
11 |
+
"sep_token": "</s>",
|
12 |
+
"tokenizer_class": "RobertaTokenizer",
|
13 |
+
"trim_offsets": true,
|
14 |
+
"unk_token": "<unk>"
|
15 |
+
}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b41460318a3c17471e106324dd3791e0d1a2a8b4a5be90371c3e8255f46dccdb
|
3 |
+
size 4091
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|