Javeria98 commited on
Commit
c3b3726
·
1 Parent(s): 7a03443

End of training

Browse files
README.md ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ tags:
4
+ - generated_from_trainer
5
+ datasets:
6
+ - imagefolder
7
+ model-index:
8
+ - name: Donut_undocumented_1
9
+ results: []
10
+ ---
11
+
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ # Donut_undocumented_1
16
+
17
+ This model is a fine-tuned version of [naver-clova-ix/donut-base](https://huggingface.co/naver-clova-ix/donut-base) on the imagefolder dataset.
18
+
19
+ ## Model description
20
+
21
+ More information needed
22
+
23
+ ## Intended uses & limitations
24
+
25
+ More information needed
26
+
27
+ ## Training and evaluation data
28
+
29
+ More information needed
30
+
31
+ ## Training procedure
32
+
33
+ ### Training hyperparameters
34
+
35
+ The following hyperparameters were used during training:
36
+ - learning_rate: 2e-05
37
+ - train_batch_size: 2
38
+ - eval_batch_size: 8
39
+ - seed: 42
40
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
41
+ - lr_scheduler_type: linear
42
+ - num_epochs: 25
43
+
44
+ ### Framework versions
45
+
46
+ - Transformers 4.30.0.dev0
47
+ - Pytorch 2.0.1+cu118
48
+ - Datasets 2.12.0
49
+ - Tokenizers 0.13.3
added_tokens.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</s_AGE>": 57544,
3
+ "</s_BP>": 57542,
4
+ "</s_Block1>": 57540,
5
+ "</s_Block2>": 57534,
6
+ "</s_Block3>": 57548,
7
+ "</s_Block4>": 57550,
8
+ "</s_DIAGNOSIS>": 57532,
9
+ "</s_GENDER>": 57552,
10
+ "</s_HEADER>": 57530,
11
+ "</s_MEDICINE_NAME>": 57538,
12
+ "</s_MEDICINE_POWER>": 57536,
13
+ "</s_NAME>": 57528,
14
+ "</s_Name>": 57554,
15
+ "</s_TEMP>": 57546,
16
+ "</s_WEIGHT>": 57526,
17
+ "<s_AGE>": 57543,
18
+ "<s_BP>": 57541,
19
+ "<s_Block1>": 57539,
20
+ "<s_Block2>": 57533,
21
+ "<s_Block3>": 57547,
22
+ "<s_Block4>": 57549,
23
+ "<s_DIAGNOSIS>": 57531,
24
+ "<s_GENDER>": 57551,
25
+ "<s_HEADER>": 57529,
26
+ "<s_MEDICINE_NAME>": 57537,
27
+ "<s_MEDICINE_POWER>": 57535,
28
+ "<s_NAME>": 57527,
29
+ "<s_Name>": 57553,
30
+ "<s_TEMP>": 57545,
31
+ "<s_WEIGHT>": 57525,
32
+ "<s_iitcdip>": 57523,
33
+ "<s_synthdog>": 57524,
34
+ "<sep/>": 57522
35
+ }
generation_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "eos_token_id": 2,
5
+ "forced_eos_token_id": 2,
6
+ "pad_token_id": 1,
7
+ "transformers_version": "4.30.0.dev0"
8
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_align_long_axis": false,
3
+ "do_normalize": true,
4
+ "do_pad": true,
5
+ "do_rescale": true,
6
+ "do_resize": true,
7
+ "do_thumbnail": true,
8
+ "image_mean": [
9
+ 0.5,
10
+ 0.5,
11
+ 0.5
12
+ ],
13
+ "image_processor_type": "DonutImageProcessor",
14
+ "image_std": [
15
+ 0.5,
16
+ 0.5,
17
+ 0.5
18
+ ],
19
+ "processor_class": "DonutProcessor",
20
+ "resample": 2,
21
+ "rescale_factor": 0.00392156862745098,
22
+ "size": [
23
+ 720,
24
+ 960
25
+ ]
26
+ }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:748bcb928444782616385a3477346aa341ef3705729e2466f058e4b57c8fe924
3
  size 809301785
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71f391e7410812886f67bc4802092def8edc6c500fda771510bf7bbe45cf7c9c
3
  size 809301785
sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb9e3dce4c326195d08fc3dd0f7e2eee1da8595c847bf4c1a9c78b7a82d47e2d
3
+ size 1296245
special_tokens_map.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<s_WEIGHT>",
4
+ "</s_WEIGHT>",
5
+ "<s_NAME>",
6
+ "</s_NAME>",
7
+ "<s_HEADER>",
8
+ "</s_HEADER>",
9
+ "<s_DIAGNOSIS>",
10
+ "</s_DIAGNOSIS>",
11
+ "<s_Block2>",
12
+ "</s_Block2>",
13
+ "<s_MEDICINE_POWER>",
14
+ "</s_MEDICINE_POWER>",
15
+ "<s_MEDICINE_NAME>",
16
+ "</s_MEDICINE_NAME>",
17
+ "<s_Block1>",
18
+ "</s_Block1>",
19
+ "<s_BP>",
20
+ "</s_BP>",
21
+ "<s_AGE>",
22
+ "</s_AGE>",
23
+ "<s_TEMP>",
24
+ "</s_TEMP>",
25
+ "<s_Block3>",
26
+ "</s_Block3>",
27
+ "<s_Block4>",
28
+ "</s_Block4>",
29
+ "<s_GENDER>",
30
+ "</s_GENDER>",
31
+ "<s_Name>",
32
+ "</s_Name>",
33
+ "<s>",
34
+ "</s>"
35
+ ],
36
+ "bos_token": "<s>",
37
+ "cls_token": "<s>",
38
+ "eos_token": "</s>",
39
+ "mask_token": {
40
+ "content": "<mask>",
41
+ "lstrip": true,
42
+ "normalized": true,
43
+ "rstrip": false,
44
+ "single_word": false
45
+ },
46
+ "pad_token": "<pad>",
47
+ "sep_token": "</s>",
48
+ "unk_token": "<unk>"
49
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "clean_up_tokenization_spaces": true,
4
+ "cls_token": "<s>",
5
+ "eos_token": "</s>",
6
+ "mask_token": {
7
+ "__type": "AddedToken",
8
+ "content": "<mask>",
9
+ "lstrip": true,
10
+ "normalized": true,
11
+ "rstrip": false,
12
+ "single_word": false
13
+ },
14
+ "model_max_length": 1000000000000000019884624838656,
15
+ "pad_token": "<pad>",
16
+ "processor_class": "DonutProcessor",
17
+ "sep_token": "</s>",
18
+ "sp_model_kwargs": {},
19
+ "tokenizer_class": "XLMRobertaTokenizer",
20
+ "unk_token": "<unk>"
21
+ }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ec2a10a7b58708e7834c92ebc1844edd938a7e1f9076c09bc5cd2008a3a36b17
3
  size 4155
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b453a48e0fc327170e35cbca69969deedb22c0dc1f7e0b1e5977467a2374d345
3
  size 4155