truong9499 commited on
Commit
763be8b
1 Parent(s): 2723e13

Upload tokenizer

Browse files
Files changed (4) hide show
  1. emoji.json +0 -0
  2. special_tokens_map.json +4 -7
  3. tokenizer_config.json +9 -15
  4. vocab.txt +0 -0
emoji.json ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json CHANGED
@@ -1,9 +1,6 @@
1
  {
2
- "bos_token": "<s>",
3
- "cls_token": "[CLS]",
4
- "eos_token": "</s>",
5
- "mask_token": "[MASK]",
6
- "pad_token": "[PAD]",
7
- "sep_token": "[SEP]",
8
- "unk_token": "<unk>"
9
  }
 
1
  {
2
+ "bos_token": "<|startoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "pad_token": "<|endoftext|>",
5
+ "unk_token": "<|endoftext|>"
 
 
 
6
  }
tokenizer_config.json CHANGED
@@ -1,17 +1,11 @@
1
  {
2
- "additional_special_tokens": [],
3
- "bos_token": "<s>",
4
- "cls_token": "[CLS]",
5
- "do_lower_case": true,
6
- "eos_token": "</s>",
7
- "extra_ids": 0,
8
- "mask_token": "[MASK]",
9
- "model_max_length": 512,
10
- "name_or_path": "rinna/japanese-gpt2-medium",
11
- "pad_token": "[PAD]",
12
- "sep_token": "[SEP]",
13
- "sp_model_kwargs": {},
14
- "special_tokens_map_file": "/root/.cache/huggingface/hub/models--rinna--japanese-gpt2-medium/snapshots/f464b76739c884d8b0479a0a7705b7fa71c3fd5a/special_tokens_map.json",
15
- "tokenizer_class": "T5Tokenizer",
16
- "unk_token": "<unk>"
17
  }
 
1
  {
2
+ "bos_token": "<|startoftext|>",
3
+ "do_clean_text": false,
4
+ "eos_token": "<|endoftext|>",
5
+ "model_max_length": 256,
6
+ "name_or_path": "abeja/gpt-neox-japanese-2.7b",
7
+ "pad_token": "<|endoftext|>",
8
+ "special_tokens_map_file": null,
9
+ "tokenizer_class": "GPTNeoXJapaneseTokenizer",
10
+ "unk_token": "<|endoftext|>"
 
 
 
 
 
 
11
  }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff