RyanHaniff commited on
Commit
efd7105
1 Parent(s): a260293

Upload tokenizer

Browse files
Files changed (3) hide show
  1. special_tokens_map.json +10 -0
  2. tokenizer_config.json +28 -0
  3. vocab.txt +0 -0
special_tokens_map.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "[EMPTY]"
4
+ ],
5
+ "cls_token": "[CLS]",
6
+ "mask_token": "[MASK]",
7
+ "pad_token": "[PAD]",
8
+ "sep_token": "[SEP]",
9
+ "unk_token": "[UNK]"
10
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "[EMPTY]"
4
+ ],
5
+ "cell_trim_length": -1,
6
+ "cls_token": "[CLS]",
7
+ "do_basic_tokenize": true,
8
+ "do_lower_case": true,
9
+ "drop_rows_to_fit": false,
10
+ "empty_token": "[EMPTY]",
11
+ "mask_token": "[MASK]",
12
+ "max_column_id": null,
13
+ "max_question_length": null,
14
+ "max_row_id": null,
15
+ "min_question_length": null,
16
+ "model_max_length": 512,
17
+ "name_or_path": "google/tapas-base",
18
+ "never_split": null,
19
+ "pad_token": "[PAD]",
20
+ "sep_token": "[SEP]",
21
+ "special_tokens_map_file": "C:\\Users\\rhani/.cache\\huggingface\\hub\\models--google--tapas-base\\snapshots\\00456266840bb0a319cd6748ebf7da3caf98816b\\special_tokens_map.json",
22
+ "strip_accents": null,
23
+ "strip_column_names": false,
24
+ "tokenize_chinese_chars": true,
25
+ "tokenizer_class": "TapasTokenizer",
26
+ "unk_token": "[UNK]",
27
+ "update_answer_coordinates": false
28
+ }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff