Training in progress, epoch 1
Browse files- .gitattributes +2 -0
- config.json +11 -13
- pytorch_model.bin +2 -2
- special_tokens_map.json +13 -5
- tokenizer.json +0 -0
- tokenizer_config.json +24 -10
- training_args.bin +1 -1
- unigram.json +3 -0
.gitattributes
CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
37 |
+
unigram.json filter=lfs diff=lfs merge=lfs -text
|
config.json
CHANGED
@@ -1,11 +1,12 @@
|
|
1 |
{
|
2 |
-
"_name_or_path": "
|
3 |
"architectures": [
|
4 |
-
"
|
5 |
],
|
6 |
"attention_probs_dropout_prob": 0.1,
|
|
|
7 |
"classifier_dropout": null,
|
8 |
-
"
|
9 |
"hidden_act": "gelu",
|
10 |
"hidden_dropout_prob": 0.1,
|
11 |
"hidden_size": 768,
|
@@ -47,21 +48,18 @@
|
|
47 |
"LABEL_8": 8,
|
48 |
"LABEL_9": 9
|
49 |
},
|
50 |
-
"layer_norm_eps": 1e-
|
51 |
-
"max_position_embeddings":
|
52 |
-
"model_type": "
|
53 |
"num_attention_heads": 12,
|
54 |
"num_hidden_layers": 12,
|
55 |
-
"
|
|
|
56 |
"position_embedding_type": "absolute",
|
57 |
"problem_type": "single_label_classification",
|
58 |
-
"summary_activation": "gelu",
|
59 |
-
"summary_last_dropout": 0.1,
|
60 |
-
"summary_type": "first",
|
61 |
-
"summary_use_proj": true,
|
62 |
"torch_dtype": "float32",
|
63 |
"transformers_version": "4.33.2",
|
64 |
-
"type_vocab_size":
|
65 |
"use_cache": true,
|
66 |
-
"vocab_size":
|
67 |
}
|
|
|
1 |
{
|
2 |
+
"_name_or_path": "classla/xlm-roberta-base-multilingual-text-genre-classifier",
|
3 |
"architectures": [
|
4 |
+
"BertForSequenceClassification"
|
5 |
],
|
6 |
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"bos_token_id": 0,
|
8 |
"classifier_dropout": null,
|
9 |
+
"eos_token_id": 2,
|
10 |
"hidden_act": "gelu",
|
11 |
"hidden_dropout_prob": 0.1,
|
12 |
"hidden_size": 768,
|
|
|
48 |
"LABEL_8": 8,
|
49 |
"LABEL_9": 9
|
50 |
},
|
51 |
+
"layer_norm_eps": 1e-05,
|
52 |
+
"max_position_embeddings": 514,
|
53 |
+
"model_type": "bert",
|
54 |
"num_attention_heads": 12,
|
55 |
"num_hidden_layers": 12,
|
56 |
+
"output_past": true,
|
57 |
+
"pad_token_id": 1,
|
58 |
"position_embedding_type": "absolute",
|
59 |
"problem_type": "single_label_classification",
|
|
|
|
|
|
|
|
|
60 |
"torch_dtype": "float32",
|
61 |
"transformers_version": "4.33.2",
|
62 |
+
"type_vocab_size": 1,
|
63 |
"use_cache": true,
|
64 |
+
"vocab_size": 250002
|
65 |
}
|
pytorch_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7500683afb79ec99a9a76d3957c89b7b6b68e6100cdb19ea461109454c31c21c
|
3 |
+
size 1112292209
|
special_tokens_map.json
CHANGED
@@ -1,7 +1,15 @@
|
|
1 |
{
|
2 |
-
"
|
3 |
-
"
|
4 |
-
"
|
5 |
-
"
|
6 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
}
|
|
|
1 |
{
|
2 |
+
"bos_token": "<s>",
|
3 |
+
"cls_token": "<s>",
|
4 |
+
"eos_token": "</s>",
|
5 |
+
"mask_token": {
|
6 |
+
"content": "<mask>",
|
7 |
+
"lstrip": true,
|
8 |
+
"normalized": false,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false
|
11 |
+
},
|
12 |
+
"pad_token": "<pad>",
|
13 |
+
"sep_token": "</s>",
|
14 |
+
"unk_token": "<unk>"
|
15 |
}
|
tokenizer.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
CHANGED
@@ -1,15 +1,29 @@
|
|
1 |
{
|
|
|
2 |
"clean_up_tokenization_spaces": true,
|
3 |
-
"cls_token": "
|
4 |
-
"
|
5 |
-
"
|
6 |
-
"mask_token":
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
"tokenize_chinese_chars": true,
|
13 |
"tokenizer_class": "BertTokenizer",
|
14 |
-
"
|
|
|
|
|
15 |
}
|
|
|
1 |
{
|
2 |
+
"bos_token": "<s>",
|
3 |
"clean_up_tokenization_spaces": true,
|
4 |
+
"cls_token": "<s>",
|
5 |
+
"do_lower_case": false,
|
6 |
+
"eos_token": "</s>",
|
7 |
+
"mask_token": {
|
8 |
+
"__type": "AddedToken",
|
9 |
+
"content": "<mask>",
|
10 |
+
"lstrip": true,
|
11 |
+
"normalized": true,
|
12 |
+
"rstrip": false,
|
13 |
+
"single_word": false
|
14 |
+
},
|
15 |
+
"max_length": 512,
|
16 |
+
"model_max_length": 512,
|
17 |
+
"pad_to_multiple_of": null,
|
18 |
+
"pad_token": "<pad>",
|
19 |
+
"pad_token_type_id": 0,
|
20 |
+
"padding_side": "right",
|
21 |
+
"sep_token": "</s>",
|
22 |
+
"stride": 0,
|
23 |
+
"strip_accents": null,
|
24 |
"tokenize_chinese_chars": true,
|
25 |
"tokenizer_class": "BertTokenizer",
|
26 |
+
"truncation_side": "right",
|
27 |
+
"truncation_strategy": "longest_first",
|
28 |
+
"unk_token": "<unk>"
|
29 |
}
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 4091
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:05dc416114fd2fd2e592e458c1628350ad11ea9eb365c70557ef47b4aac67780
|
3 |
size 4091
|
unigram.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:71b44701d7efd054205115acfa6ef126c5d2f84bd3affe0c59e48163674d19a6
|
3 |
+
size 14763234
|