simondg commited on
Commit
af74633
1 Parent(s): a0e9fd5

2nd version + model cards addition

Browse files
Files changed (5) hide show
  1. README.md +21 -0
  2. config.json +8 -6
  3. pytorch_model.bin +2 -2
  4. tokenizer_config.json +1 -1
  5. training_args.bin +1 -1
README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ByT5 Dutch OCR correction
2
+
3
+ This model is a finetuned byT5 model that corrects OCR mistakes found in dutch sentences. The [google/byt5-base](https://huggingface.co/google/byt5-base) model is finetuned on the dutch section of the [OSCAR](https://huggingface.co/datasets/oscar) dataset.
4
+
5
+
6
+ ## Usage
7
+
8
+ ```python
9
+ from transformers import AutoTokenizer, T5ForConditionalGeneration
10
+
11
+ example_sentence = "Een algoritme dat op basis van kunstmatige inte11i9entie vkijwe1 geautomatiseerd een Nederlandstalige tekst samenstelt."
12
+
13
+ tokenizer = AutoTokenizer.from_pretrained('ml6team/byt5-small-dutch-ocr-correction')
14
+
15
+ model_inputs = tokenizer(example_sentence, max_length=128, truncation=True, return_tensors="pt")
16
+
17
+ model = T5ForConditionalGeneration.from_pretrained('ml6team/byt5-small-dutch-ocr-correction')
18
+ outputs = model.generate(**model_inputs, max_length=128)
19
+
20
+ tokenizer.decode(outputs[0])
21
+ ```
config.json CHANGED
@@ -1,11 +1,11 @@
1
  {
2
- "_name_or_path": "google/byt5-small",
3
  "architectures": [
4
  "T5ForConditionalGeneration"
5
  ],
6
- "d_ff": 3584,
7
  "d_kv": 64,
8
- "d_model": 1472,
9
  "decoder_start_token_id": 0,
10
  "dropout_rate": 0.1,
11
  "eos_token_id": 1,
@@ -14,10 +14,12 @@
14
  "initializer_factor": 1.0,
15
  "is_encoder_decoder": true,
16
  "layer_norm_epsilon": 1e-06,
 
17
  "model_type": "t5",
18
- "num_decoder_layers": 4,
19
- "num_heads": 6,
20
- "num_layers": 12,
 
21
  "pad_token_id": 0,
22
  "relative_attention_num_buckets": 32,
23
  "tie_word_embeddings": false,
1
  {
2
+ "_name_or_path": "google/byt5-base",
3
  "architectures": [
4
  "T5ForConditionalGeneration"
5
  ],
6
+ "d_ff": 3968,
7
  "d_kv": 64,
8
+ "d_model": 1536,
9
  "decoder_start_token_id": 0,
10
  "dropout_rate": 0.1,
11
  "eos_token_id": 1,
14
  "initializer_factor": 1.0,
15
  "is_encoder_decoder": true,
16
  "layer_norm_epsilon": 1e-06,
17
+ "max_length": 128,
18
  "model_type": "t5",
19
+ "num_decoder_layers": 6,
20
+ "num_heads": 12,
21
+ "num_layers": 18,
22
+ "output_past": true,
23
  "pad_token_id": 0,
24
  "relative_attention_num_buckets": 32,
25
  "tie_word_embeddings": false,
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d2f9fdf11711ba5274f54d5648662b8ce92fbd0db1292c4e4b5cdaafc448e59a
3
- size 1198627501
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27f964d9bdf1133ea53987a2309ea0a1aa395c8469a14ea157524c9a326f1f14
3
+ size 2326726857
tokenizer_config.json CHANGED
@@ -1 +1 @@
1
- {"eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "unk_token": {"content": "<unk>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "extra_ids": 125, "additional_special_tokens": ["<extra_id_0>", "<extra_id_1>", "<extra_id_2>", "<extra_id_3>", "<extra_id_4>", "<extra_id_5>", "<extra_id_6>", "<extra_id_7>", "<extra_id_8>", "<extra_id_9>", "<extra_id_10>", "<extra_id_11>", "<extra_id_12>", "<extra_id_13>", "<extra_id_14>", "<extra_id_15>", "<extra_id_16>", "<extra_id_17>", "<extra_id_18>", "<extra_id_19>", "<extra_id_20>", "<extra_id_21>", "<extra_id_22>", "<extra_id_23>", "<extra_id_24>", "<extra_id_25>", "<extra_id_26>", "<extra_id_27>", "<extra_id_28>", "<extra_id_29>", "<extra_id_30>", "<extra_id_31>", "<extra_id_32>", "<extra_id_33>", "<extra_id_34>", "<extra_id_35>", "<extra_id_36>", "<extra_id_37>", "<extra_id_38>", "<extra_id_39>", "<extra_id_40>", "<extra_id_41>", "<extra_id_42>", "<extra_id_43>", "<extra_id_44>", "<extra_id_45>", "<extra_id_46>", "<extra_id_47>", "<extra_id_48>", "<extra_id_49>", "<extra_id_50>", "<extra_id_51>", "<extra_id_52>", "<extra_id_53>", "<extra_id_54>", "<extra_id_55>", "<extra_id_56>", "<extra_id_57>", "<extra_id_58>", "<extra_id_59>", "<extra_id_60>", "<extra_id_61>", "<extra_id_62>", "<extra_id_63>", "<extra_id_64>", "<extra_id_65>", "<extra_id_66>", "<extra_id_67>", "<extra_id_68>", "<extra_id_69>", "<extra_id_70>", "<extra_id_71>", "<extra_id_72>", "<extra_id_73>", "<extra_id_74>", "<extra_id_75>", "<extra_id_76>", "<extra_id_77>", "<extra_id_78>", "<extra_id_79>", "<extra_id_80>", "<extra_id_81>", "<extra_id_82>", "<extra_id_83>", "<extra_id_84>", "<extra_id_85>", "<extra_id_86>", "<extra_id_87>", "<extra_id_88>", "<extra_id_89>", "<extra_id_90>", "<extra_id_91>", "<extra_id_92>", "<extra_id_93>", "<extra_id_94>", "<extra_id_95>", "<extra_id_96>", "<extra_id_97>", "<extra_id_98>", "<extra_id_99>", "<extra_id_100>", "<extra_id_101>", "<extra_id_102>", "<extra_id_103>", "<extra_id_104>", "<extra_id_105>", "<extra_id_106>", "<extra_id_107>", "<extra_id_108>", "<extra_id_109>", "<extra_id_110>", "<extra_id_111>", "<extra_id_112>", "<extra_id_113>", "<extra_id_114>", "<extra_id_115>", "<extra_id_116>", "<extra_id_117>", "<extra_id_118>", "<extra_id_119>", "<extra_id_120>", "<extra_id_121>", "<extra_id_122>", "<extra_id_123>", "<extra_id_124>"], "special_tokens_map_file": "/home/jupyter/.cache/huggingface/transformers/3715c3de7f5eda8f5c9274c55ea8e8632fe6e4cdfdf8e9066ae173b956cd9b80.063895353d5ef9e19a25220cb616c43abc5e84a2f11b1ffb71c29e097572a109", "tokenizer_file": null, "name_or_path": "google/byt5-small", "tokenizer_class": "ByT5Tokenizer"}
1
+ {"eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "unk_token": {"content": "<unk>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "extra_ids": 125, "additional_special_tokens": ["<extra_id_0>", "<extra_id_1>", "<extra_id_2>", "<extra_id_3>", "<extra_id_4>", "<extra_id_5>", "<extra_id_6>", "<extra_id_7>", "<extra_id_8>", "<extra_id_9>", "<extra_id_10>", "<extra_id_11>", "<extra_id_12>", "<extra_id_13>", "<extra_id_14>", "<extra_id_15>", "<extra_id_16>", "<extra_id_17>", "<extra_id_18>", "<extra_id_19>", "<extra_id_20>", "<extra_id_21>", "<extra_id_22>", "<extra_id_23>", "<extra_id_24>", "<extra_id_25>", "<extra_id_26>", "<extra_id_27>", "<extra_id_28>", "<extra_id_29>", "<extra_id_30>", "<extra_id_31>", "<extra_id_32>", "<extra_id_33>", "<extra_id_34>", "<extra_id_35>", "<extra_id_36>", "<extra_id_37>", "<extra_id_38>", "<extra_id_39>", "<extra_id_40>", "<extra_id_41>", "<extra_id_42>", "<extra_id_43>", "<extra_id_44>", "<extra_id_45>", "<extra_id_46>", "<extra_id_47>", "<extra_id_48>", "<extra_id_49>", "<extra_id_50>", "<extra_id_51>", "<extra_id_52>", "<extra_id_53>", "<extra_id_54>", "<extra_id_55>", "<extra_id_56>", "<extra_id_57>", "<extra_id_58>", "<extra_id_59>", "<extra_id_60>", "<extra_id_61>", "<extra_id_62>", "<extra_id_63>", "<extra_id_64>", "<extra_id_65>", "<extra_id_66>", "<extra_id_67>", "<extra_id_68>", "<extra_id_69>", "<extra_id_70>", "<extra_id_71>", "<extra_id_72>", "<extra_id_73>", "<extra_id_74>", "<extra_id_75>", "<extra_id_76>", "<extra_id_77>", "<extra_id_78>", "<extra_id_79>", "<extra_id_80>", "<extra_id_81>", "<extra_id_82>", "<extra_id_83>", "<extra_id_84>", "<extra_id_85>", "<extra_id_86>", "<extra_id_87>", "<extra_id_88>", "<extra_id_89>", "<extra_id_90>", "<extra_id_91>", "<extra_id_92>", "<extra_id_93>", "<extra_id_94>", "<extra_id_95>", "<extra_id_96>", "<extra_id_97>", "<extra_id_98>", "<extra_id_99>", "<extra_id_100>", "<extra_id_101>", "<extra_id_102>", "<extra_id_103>", "<extra_id_104>", "<extra_id_105>", "<extra_id_106>", "<extra_id_107>", "<extra_id_108>", "<extra_id_109>", "<extra_id_110>", "<extra_id_111>", "<extra_id_112>", "<extra_id_113>", "<extra_id_114>", "<extra_id_115>", "<extra_id_116>", "<extra_id_117>", "<extra_id_118>", "<extra_id_119>", "<extra_id_120>", "<extra_id_121>", "<extra_id_122>", "<extra_id_123>", "<extra_id_124>"], "max_length": 128, "special_tokens_map_file": "/home/jupyter/.cache/huggingface/transformers/f22e687c418a9ed3e651ca340d6a5880bbb312bc24bbe893f5cd47288891b89d.063895353d5ef9e19a25220cb616c43abc5e84a2f11b1ffb71c29e097572a109", "tokenizer_file": null, "name_or_path": "google/byt5-base", "tokenizer_class": "ByT5Tokenizer"}
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d3e2f55c734f8bf61615c7f29f6bcf32b44d99e6e8b7b5d9dd96d470107c35f6
3
  size 2671
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8b7c3480a155bfb2b1502ff18aa4ae2e2e6456fef905f4c4cf747423c8947df
3
  size 2671