commit files to HF hub
Browse files- config.json +30 -0
- deberta_result.log +23 -0
- dict.txt +0 -0
- pytorch_model.bin +3 -0
- sentencepiece.bpe.model +3 -0
- special_tokens_map.json +15 -0
- tokenizer_config.json +21 -0
- vi_deberta_base_checkpoint_1.pt +3 -0
- vi_deberta_base_checkpoint_2.pt +3 -0
- vi_deberta_base_checkpoint_3.pt +3 -0
- vi_deberta_base_checkpoint_4.pt +3 -0
config.json
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"DebertaV2ForMaskedLM"
|
4 |
+
],
|
5 |
+
"attention_probs_dropout_prob": 0.1,
|
6 |
+
"bos_token_id": 0,
|
7 |
+
"eos_token_id": 2,
|
8 |
+
"hidden_act": "gelu",
|
9 |
+
"hidden_dropout_prob": 0.1,
|
10 |
+
"hidden_size": 768,
|
11 |
+
"initializer_range": 0.02,
|
12 |
+
"intermediate_size": 3072,
|
13 |
+
"layer_norm_eps": 1e-07,
|
14 |
+
"max_position_embeddings": 512,
|
15 |
+
"max_relative_positions": -1,
|
16 |
+
"model_type": "deberta-v2",
|
17 |
+
"num_attention_heads": 12,
|
18 |
+
"num_hidden_layers": 12,
|
19 |
+
"pad_token_id": 1,
|
20 |
+
"pooler_dropout": 0,
|
21 |
+
"pooler_hidden_act": "gelu",
|
22 |
+
"pooler_hidden_size": 768,
|
23 |
+
"pos_att_type": null,
|
24 |
+
"position_biased_input": true,
|
25 |
+
"relative_attention": false,
|
26 |
+
"torch_dtype": "float32",
|
27 |
+
"transformers_version": "4.21.2",
|
28 |
+
"type_vocab_size": 0,
|
29 |
+
"vocab_size": 40030
|
30 |
+
}
|
deberta_result.log
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Before load dataset, RAM used: 0.36 GB | Avaiable: 200.44 GB | Left: 200.08 GB
|
2 |
+
After load dataset, RAM used: 0.61 GB | Avaiable: 200.42 GB | Left: 199.80 GB
|
3 |
+
After Prepare Dataloader, RAM used: 6.94 GB | Avaiable: 203.21 GB | Left: 196.27 GB
|
4 |
+
After epoch 1, RAM used: 27.96 GB | Avaiable: 206.69 GB | Left: 178.73 GB
|
5 |
+
|
6 |
+
>>> Epoch 1: Perplexity: 8.71756492102604 Loss: 1.987068991905017
|
7 |
+
Loss improved inf -> 1.987068991905017
|
8 |
+
Saved training checkpoint
|
9 |
+
After epoch 2, RAM used: 32.61 GB | Avaiable: 217.10 GB | Left: 184.49 GB
|
10 |
+
|
11 |
+
>>> Epoch 2: Perplexity: 6.334900481327126 Loss: 1.7609998571673966
|
12 |
+
Loss improved 1.987068991905017 -> 1.7609998571673966
|
13 |
+
Saved training checkpoint
|
14 |
+
After epoch 3, RAM used: 32.60 GB | Avaiable: 220.86 GB | Left: 188.26 GB
|
15 |
+
|
16 |
+
>>> Epoch 3: Perplexity: 5.783759349968259 Loss: 1.714263437903529
|
17 |
+
Loss improved 1.7609998571673966 -> 1.714263437903529
|
18 |
+
Saved training checkpoint
|
19 |
+
After epoch 4, RAM used: 32.33 GB | Avaiable: 192.09 GB | Left: 159.77 GB
|
20 |
+
|
21 |
+
>>> Epoch 4: Perplexity: 4.530374708434244 Loss: 1.5004713556098934
|
22 |
+
Loss improved 1.714263437903529 -> 1.5004713556098934
|
23 |
+
Saved training checkpoint
|
dict.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4055da3ac6a0a8e5518e75bf98eb16ed89392f54dbf0e284405431f62c3d1276
|
3 |
+
size 467369837
|
sentencepiece.bpe.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
|
3 |
+
size 5069051
|
special_tokens_map.json
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": "<s>",
|
3 |
+
"cls_token": "<s>",
|
4 |
+
"eos_token": "</s>",
|
5 |
+
"mask_token": {
|
6 |
+
"content": "<mask>",
|
7 |
+
"lstrip": true,
|
8 |
+
"normalized": true,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false
|
11 |
+
},
|
12 |
+
"pad_token": "<pad>",
|
13 |
+
"sep_token": "</s>",
|
14 |
+
"unk_token": "<unk>"
|
15 |
+
}
|
tokenizer_config.json
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": "<s>",
|
3 |
+
"cls_token": "<s>",
|
4 |
+
"eos_token": "</s>",
|
5 |
+
"mask_token": {
|
6 |
+
"__type": "AddedToken",
|
7 |
+
"content": "<mask>",
|
8 |
+
"lstrip": true,
|
9 |
+
"normalized": true,
|
10 |
+
"rstrip": false,
|
11 |
+
"single_word": false
|
12 |
+
},
|
13 |
+
"model_max_length": 1024,
|
14 |
+
"name_or_path": "vinai/bartpho-syllable",
|
15 |
+
"pad_token": "<pad>",
|
16 |
+
"sep_token": "</s>",
|
17 |
+
"sp_model_kwargs": {},
|
18 |
+
"special_tokens_map_file": null,
|
19 |
+
"tokenizer_class": "BartphoTokenizer",
|
20 |
+
"unk_token": "<unk>"
|
21 |
+
}
|
vi_deberta_base_checkpoint_1.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b95eb38bd4284fe8e903d597f9ee4ed4dd3727e7f72d1d7d47f465eecc6670d6
|
3 |
+
size 467404461
|
vi_deberta_base_checkpoint_2.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f4db46a06d68e91dae13800e2fa2c9040bf6328f683c5c996cf10f60397c4337
|
3 |
+
size 467404525
|
vi_deberta_base_checkpoint_3.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d3c1bf31db23b9de01ee09b25f9e1deb0412725236e06adc20283254790a0b46
|
3 |
+
size 467404525
|
vi_deberta_base_checkpoint_4.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e8fe8ce12c371a2f3a9b5d14452cc50c2c86138f2f7004339e3e67a61c53ee8a
|
3 |
+
size 467404525
|