lim4349 commited on
Commit
a810c49
1 Parent(s): ee4c475

Upload tokenizer

Browse files
Files changed (4) hide show
  1. special_tokens_map.json +9 -0
  2. tokenizer.json +0 -0
  3. tokenizer_config.json +19 -0
  4. vocab.txt +0 -0
special_tokens_map.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "eos_token": "[SEP]",
5
+ "mask_token": "[MASK]",
6
+ "pad_token": "[PAD]",
7
+ "sep_token": "[SEP]",
8
+ "unk_token": "[UNK]"
9
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "do_basic_tokenize": true,
5
+ "do_lower_case": false,
6
+ "eos_token": "[SEP]",
7
+ "from_tf": false,
8
+ "mask_token": "[MASK]",
9
+ "model_max_length": 512,
10
+ "name_or_path": "/opt/ml/ray_results/_objective_2023-01-03_10-02-38/_objective_c119e_00000_0_learning_rate=0.0000,num_train_epochs=5,per_device_train_batch_size=16_2023-01-03_10-02-38/saved_models/klue/roberta-large/mrc_LSG_01-03-19-01/run-c119e_00000/checkpoint-21785",
11
+ "never_split": null,
12
+ "pad_token": "[PAD]",
13
+ "sep_token": "[SEP]",
14
+ "special_tokens_map_file": "/opt/ml/.cache/huggingface/hub/models--klue--roberta-large/snapshots/5193b95701189160c45d02a1033a4ea55bdbe259/special_tokens_map.json",
15
+ "strip_accents": null,
16
+ "tokenize_chinese_chars": true,
17
+ "tokenizer_class": "BertTokenizer",
18
+ "unk_token": "[UNK]"
19
+ }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff