zenoda commited on
Commit
649e257
1 Parent(s): 90da16f

Upload 5 files

Browse files
preprocessor_config.json CHANGED
@@ -1,19 +1,28 @@
1
  {
2
- "crop_size": 224,
 
 
 
3
  "do_center_crop": false,
4
  "do_normalize": true,
 
5
  "do_resize": true,
6
- "feature_extractor_type": "DeiTFeatureExtractor",
7
  "image_mean": [
8
  0.5,
9
  0.5,
10
  0.5
11
  ],
 
12
  "image_std": [
13
  0.5,
14
  0.5,
15
  0.5
16
  ],
 
17
  "resample": 3,
18
- "size": 384
 
 
 
 
19
  }
 
1
  {
2
+ "crop_size": {
3
+ "height": 224,
4
+ "width": 224
5
+ },
6
  "do_center_crop": false,
7
  "do_normalize": true,
8
+ "do_rescale": true,
9
  "do_resize": true,
 
10
  "image_mean": [
11
  0.5,
12
  0.5,
13
  0.5
14
  ],
15
+ "image_processor_type": "DeiTImageProcessor",
16
  "image_std": [
17
  0.5,
18
  0.5,
19
  0.5
20
  ],
21
+ "processor_class": "TrOCRProcessor",
22
  "resample": 3,
23
+ "rescale_factor": 0.00392156862745098,
24
+ "size": {
25
+ "height": 384,
26
+ "width": 384
27
+ }
28
  }
sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f5e2fefcf793761a76a6bfb8ad35489f9c203b25557673284b6d032f41043f4
3
+ size 1356293
special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "clean_up_tokenization_spaces": true,
4
+ "cls_token": "<s>",
5
+ "eos_token": "</s>",
6
+ "mask_token": {
7
+ "__type": "AddedToken",
8
+ "content": "<mask>",
9
+ "lstrip": true,
10
+ "normalized": true,
11
+ "rstrip": false,
12
+ "single_word": false
13
+ },
14
+ "model_max_length": 1000000000000000019884624838656,
15
+ "pad_token": "<pad>",
16
+ "processor_class": "TrOCRProcessor",
17
+ "sep_token": "</s>",
18
+ "sp_model_kwargs": {},
19
+ "tokenizer_class": "XLMRobertaTokenizer",
20
+ "unk_token": "<unk>"
21
+ }