Upload 14 files
Browse files- README.md +80 -1
- all_results.json +14 -0
- config.json +75 -0
- eval_results.json +9 -0
- generation_config.json +12 -0
- merges.txt +0 -0
- pytorch_model.bin +3 -0
- special_tokens_map.json +15 -0
- tokenizer.json +0 -0
- tokenizer_config.json +15 -0
- train_results.json +8 -0
- trainer_state.json +0 -0
- training_args.bin +3 -0
- vocab.json +0 -0
README.md
CHANGED
@@ -1,3 +1,82 @@
|
|
1 |
---
|
2 |
-
license:
|
|
|
|
|
|
|
|
|
|
|
3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
+
license: apache-2.0
|
3 |
+
tags:
|
4 |
+
- generated_from_trainer
|
5 |
+
model-index:
|
6 |
+
- name: bart-base-spelling-nl
|
7 |
+
results: []
|
8 |
---
|
9 |
+
|
10 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
11 |
+
should probably proofread and complete it, then remove this comment. -->
|
12 |
+
|
13 |
+
# bart-base-spelling-nl
|
14 |
+
|
15 |
+
This model is a fine-tuned version of [facebook/bart-base](https://huggingface.co/facebook/bart-base).
|
16 |
+
|
17 |
+
It achieves the following results on the evaluation set:
|
18 |
+
- Loss: 0.0276
|
19 |
+
- Cer: 0.0147
|
20 |
+
|
21 |
+
## Model description
|
22 |
+
|
23 |
+
This is a text-to-text fine-tuned version of [facebook/bart-base](https://huggingface.co/facebook/bart-base) trained on spelling correction. It leans on the excellent work by Oliver Guhr ([github](https://github.com/oliverguhr/spelling), [huggingface](https://huggingface.co/oliverguhr/spelling-correction-english-base)). Training was performed on an AWS EC2 instance (g5.xlarge) on a single GPU in about 4 hours.
|
24 |
+
|
25 |
+
## Intended uses & limitations
|
26 |
+
|
27 |
+
The intended use for this model is to be a component of the [Valkuil.net](https://valkuil.net) context-sensitive spelling checker. A next version of the model will be trained on more data.
|
28 |
+
|
29 |
+
## Training and evaluation data
|
30 |
+
|
31 |
+
The model was trained on a Dutch dataset composed of 300,000 lines of text from three public Dutch sources, downloaded from the [Opus corpus](https://opus.nlpl.eu/):
|
32 |
+
|
33 |
+
- nl-europarlv7.100k.txt
|
34 |
+
- nl-opensubtitles2016.100k.txt
|
35 |
+
- nl-wikipedia.100k.txt
|
36 |
+
|
37 |
+
|
38 |
+
## Training procedure
|
39 |
+
|
40 |
+
### Training hyperparameters
|
41 |
+
|
42 |
+
The following hyperparameters were used during training:
|
43 |
+
- learning_rate: 0.0003
|
44 |
+
- train_batch_size: 2
|
45 |
+
- eval_batch_size: 4
|
46 |
+
- seed: 42
|
47 |
+
- gradient_accumulation_steps: 16
|
48 |
+
- total_train_batch_size: 32
|
49 |
+
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
50 |
+
- lr_scheduler_type: linear
|
51 |
+
- num_epochs: 2.0
|
52 |
+
|
53 |
+
### Training results
|
54 |
+
|
55 |
+
| Training Loss | Epoch | Step | Validation Loss | Cer |
|
56 |
+
|:-------------:|:-----:|:-----:|:---------------:|:------:|
|
57 |
+
| 0.1617 | 0.11 | 1000 | 0.0986 | 0.9241 |
|
58 |
+
| 0.1326 | 0.21 | 2000 | 0.0676 | 0.9240 |
|
59 |
+
| 0.09 | 0.32 | 3000 | 0.0586 | 0.9241 |
|
60 |
+
| 0.0891 | 0.43 | 4000 | 0.0530 | 0.9240 |
|
61 |
+
| 0.0753 | 0.54 | 5000 | 0.0491 | 0.9239 |
|
62 |
+
| 0.069 | 0.64 | 6000 | 0.0459 | 0.9238 |
|
63 |
+
| 0.0615 | 0.75 | 7000 | 0.0435 | 0.9238 |
|
64 |
+
| 0.0494 | 0.86 | 8000 | 0.0409 | 0.9237 |
|
65 |
+
| 0.0671 | 0.97 | 9000 | 0.0388 | 0.9238 |
|
66 |
+
| 0.0425 | 1.07 | 10000 | 0.0367 | 0.9237 |
|
67 |
+
| 0.0394 | 1.18 | 11000 | 0.0356 | 0.9237 |
|
68 |
+
| 0.0399 | 1.29 | 12000 | 0.0344 | 0.9236 |
|
69 |
+
| 0.0375 | 1.4 | 13000 | 0.0333 | 0.9235 |
|
70 |
+
| 0.0409 | 1.5 | 14000 | 0.0315 | 0.9237 |
|
71 |
+
| 0.0291 | 1.61 | 15000 | 0.0304 | 0.9236 |
|
72 |
+
| 0.0268 | 1.72 | 16000 | 0.0293 | 0.9236 |
|
73 |
+
| 0.0309 | 1.83 | 17000 | 0.0284 | 0.9235 |
|
74 |
+
| 0.0362 | 1.93 | 18000 | 0.0276 | 0.9235 |
|
75 |
+
|
76 |
+
|
77 |
+
### Framework versions
|
78 |
+
|
79 |
+
- Transformers 4.27.3
|
80 |
+
- Pytorch 2.0.0+cu117
|
81 |
+
- Datasets 2.10.1
|
82 |
+
- Tokenizers 0.13.2
|
all_results.json
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 2.0,
|
3 |
+
"eval_cer": 0.014650276279943154,
|
4 |
+
"eval_loss": 0.02764066681265831,
|
5 |
+
"eval_runtime": 1893.5756,
|
6 |
+
"eval_samples": 1998,
|
7 |
+
"eval_samples_per_second": 1.055,
|
8 |
+
"eval_steps_per_second": 0.264,
|
9 |
+
"train_loss": 0.07157236041639212,
|
10 |
+
"train_runtime": 20459.2275,
|
11 |
+
"train_samples": 297945,
|
12 |
+
"train_samples_per_second": 29.126,
|
13 |
+
"train_steps_per_second": 0.91
|
14 |
+
}
|
config.json
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "facebook/bart-base",
|
3 |
+
"activation_dropout": 0.1,
|
4 |
+
"activation_function": "gelu",
|
5 |
+
"add_bias_logits": false,
|
6 |
+
"add_final_layer_norm": false,
|
7 |
+
"architectures": [
|
8 |
+
"BartForConditionalGeneration"
|
9 |
+
],
|
10 |
+
"attention_dropout": 0.1,
|
11 |
+
"bos_token_id": 0,
|
12 |
+
"classif_dropout": 0.1,
|
13 |
+
"classifier_dropout": 0.0,
|
14 |
+
"d_model": 768,
|
15 |
+
"decoder_attention_heads": 12,
|
16 |
+
"decoder_ffn_dim": 3072,
|
17 |
+
"decoder_layerdrop": 0.0,
|
18 |
+
"decoder_layers": 6,
|
19 |
+
"decoder_start_token_id": 2,
|
20 |
+
"dropout": 0.1,
|
21 |
+
"early_stopping": true,
|
22 |
+
"encoder_attention_heads": 12,
|
23 |
+
"encoder_ffn_dim": 3072,
|
24 |
+
"encoder_layerdrop": 0.0,
|
25 |
+
"encoder_layers": 6,
|
26 |
+
"eos_token_id": 2,
|
27 |
+
"forced_bos_token_id": 0,
|
28 |
+
"forced_eos_token_id": 2,
|
29 |
+
"gradient_checkpointing": false,
|
30 |
+
"id2label": {
|
31 |
+
"0": "LABEL_0",
|
32 |
+
"1": "LABEL_1",
|
33 |
+
"2": "LABEL_2"
|
34 |
+
},
|
35 |
+
"init_std": 0.02,
|
36 |
+
"is_encoder_decoder": true,
|
37 |
+
"label2id": {
|
38 |
+
"LABEL_0": 0,
|
39 |
+
"LABEL_1": 1,
|
40 |
+
"LABEL_2": 2
|
41 |
+
},
|
42 |
+
"max_position_embeddings": 1024,
|
43 |
+
"model_type": "bart",
|
44 |
+
"no_repeat_ngram_size": 3,
|
45 |
+
"normalize_before": false,
|
46 |
+
"normalize_embedding": true,
|
47 |
+
"num_beams": 4,
|
48 |
+
"num_hidden_layers": 6,
|
49 |
+
"pad_token_id": 1,
|
50 |
+
"scale_embedding": false,
|
51 |
+
"task_specific_params": {
|
52 |
+
"summarization": {
|
53 |
+
"length_penalty": 1.0,
|
54 |
+
"max_length": 128,
|
55 |
+
"min_length": 12,
|
56 |
+
"num_beams": 4
|
57 |
+
},
|
58 |
+
"summarization_cnn": {
|
59 |
+
"length_penalty": 2.0,
|
60 |
+
"max_length": 142,
|
61 |
+
"min_length": 56,
|
62 |
+
"num_beams": 4
|
63 |
+
},
|
64 |
+
"summarization_xsum": {
|
65 |
+
"length_penalty": 1.0,
|
66 |
+
"max_length": 62,
|
67 |
+
"min_length": 11,
|
68 |
+
"num_beams": 6
|
69 |
+
}
|
70 |
+
},
|
71 |
+
"torch_dtype": "float32",
|
72 |
+
"transformers_version": "4.27.3",
|
73 |
+
"use_cache": true,
|
74 |
+
"vocab_size": 50265
|
75 |
+
}
|
eval_results.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 2.0,
|
3 |
+
"eval_cer": 0.014650276279943154,
|
4 |
+
"eval_loss": 0.02764066681265831,
|
5 |
+
"eval_runtime": 1893.5756,
|
6 |
+
"eval_samples": 1998,
|
7 |
+
"eval_samples_per_second": 1.055,
|
8 |
+
"eval_steps_per_second": 0.264
|
9 |
+
}
|
generation_config.json
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token_id": 0,
|
3 |
+
"decoder_start_token_id": 2,
|
4 |
+
"early_stopping": true,
|
5 |
+
"eos_token_id": 2,
|
6 |
+
"forced_bos_token_id": 0,
|
7 |
+
"forced_eos_token_id": 2,
|
8 |
+
"no_repeat_ngram_size": 3,
|
9 |
+
"num_beams": 4,
|
10 |
+
"pad_token_id": 1,
|
11 |
+
"transformers_version": "4.27.3"
|
12 |
+
}
|
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:80ec838026fb8064635d2b995bbdaee79908374ba00bf32c931e3f2c58ae7740
|
3 |
+
size 557971229
|
special_tokens_map.json
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": "<s>",
|
3 |
+
"cls_token": "<s>",
|
4 |
+
"eos_token": "</s>",
|
5 |
+
"mask_token": {
|
6 |
+
"content": "<mask>",
|
7 |
+
"lstrip": true,
|
8 |
+
"normalized": false,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false
|
11 |
+
},
|
12 |
+
"pad_token": "<pad>",
|
13 |
+
"sep_token": "</s>",
|
14 |
+
"unk_token": "<unk>"
|
15 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_prefix_space": false,
|
3 |
+
"bos_token": "<s>",
|
4 |
+
"cls_token": "<s>",
|
5 |
+
"eos_token": "</s>",
|
6 |
+
"errors": "replace",
|
7 |
+
"mask_token": "<mask>",
|
8 |
+
"model_max_length": 1024,
|
9 |
+
"pad_token": "<pad>",
|
10 |
+
"sep_token": "</s>",
|
11 |
+
"special_tokens_map_file": null,
|
12 |
+
"tokenizer_class": "BartTokenizer",
|
13 |
+
"trim_offsets": true,
|
14 |
+
"unk_token": "<unk>"
|
15 |
+
}
|
train_results.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 2.0,
|
3 |
+
"train_loss": 0.07157236041639212,
|
4 |
+
"train_runtime": 20459.2275,
|
5 |
+
"train_samples": 297945,
|
6 |
+
"train_samples_per_second": 29.126,
|
7 |
+
"train_steps_per_second": 0.91
|
8 |
+
}
|
trainer_state.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:57b1e317a1539f18fba060b518401be911171d875d5c3512b6969c88a588025d
|
3 |
+
size 3707
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|