smcleod commited on
Commit
cbb3f4a
1 Parent(s): 2b9e5cc

Upload 8 files

Browse files
converted_tokenizer/special_tokens_map.json CHANGED
@@ -1,5 +1,30 @@
1
  {
2
- "bos_token": "<|endoftext|>",
3
- "eos_token": "<|endoftext|>",
4
- "pad_token": "<|endoftext|>"
5
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  {
2
+ "bos_token": {
3
+ "content": "<|endoftext|>",
4
+ "lstrip": true,
5
+ "normalized": false,
6
+ "rstrip": true,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": true,
12
+ "normalized": false,
13
+ "rstrip": true,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|endoftext|>",
18
+ "lstrip": true,
19
+ "normalized": false,
20
+ "rstrip": true,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<|endoftext|>",
25
+ "lstrip": true,
26
+ "normalized": false,
27
+ "rstrip": true,
28
+ "single_word": false
29
+ }
30
+ }
converted_tokenizer/tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
converted_tokenizer/tokenizer_config.json CHANGED
@@ -774,7 +774,9 @@
774
  "chat_template": "{% for message in messages %}{% if (message['role'] == 'system') %}{{'<|im_start|>system<|im_sep|>' + message['content'] + '<|im_end|>'}}{% elif (message['role'] == 'user') %}{{'<|im_start|>user<|im_sep|>' + message['content'] + '<|im_end|><|im_start|>assistant<|im_sep|>'}}{% elif (message['role'] == 'assistant') %}{{message['content'] + '<|im_end|>'}}{% endif %}{% endfor %}",
775
  "clean_up_tokenization_spaces": false,
776
  "eos_token": "<|endoftext|>",
 
777
  "model_max_length": 16384,
778
  "pad_token": "<|endoftext|>",
779
- "tokenizer_class": "GPT2Tokenizer"
780
- }
 
 
774
  "chat_template": "{% for message in messages %}{% if (message['role'] == 'system') %}{{'<|im_start|>system<|im_sep|>' + message['content'] + '<|im_end|>'}}{% elif (message['role'] == 'user') %}{{'<|im_start|>user<|im_sep|>' + message['content'] + '<|im_end|><|im_start|>assistant<|im_sep|>'}}{% elif (message['role'] == 'assistant') %}{{message['content'] + '<|im_end|>'}}{% endif %}{% endfor %}",
775
  "clean_up_tokenization_spaces": false,
776
  "eos_token": "<|endoftext|>",
777
+ "extra_special_tokens": {},
778
  "model_max_length": 16384,
779
  "pad_token": "<|endoftext|>",
780
+ "tokenizer_class": "GPT2Tokenizer",
781
+ "unk_token": "<|endoftext|>"
782
+ }
converted_tokenizer/vocab.json CHANGED
The diff for this file is too large to render. See raw diff