codingwithlewis commited on
Commit
8e112c2
1 Parent(s): 9375028

Upload tokenizer

Browse files
special_tokens_map.json CHANGED
@@ -7,7 +7,7 @@
7
  "single_word": false
8
  },
9
  "eos_token": {
10
- "content": "</s>",
11
  "lstrip": false,
12
  "normalized": false,
13
  "rstrip": false,
 
7
  "single_word": false
8
  },
9
  "eos_token": {
10
+ "content": "<|im_end|>",
11
  "lstrip": false,
12
  "normalized": false,
13
  "rstrip": false,
tokenizer.json CHANGED
@@ -23,7 +23,7 @@
23
  },
24
  {
25
  "id": 2,
26
- "content": "</s>",
27
  "single_word": false,
28
  "lstrip": false,
29
  "rstrip": false,
@@ -137,7 +137,7 @@
137
  "vocab": {
138
  "<unk>": 0,
139
  "<s>": 1,
140
- "</s>": 2,
141
  "<0x00>": 3,
142
  "<0x01>": 4,
143
  "<0x02>": 5,
 
23
  },
24
  {
25
  "id": 2,
26
+ "content": "<|im_end|>",
27
  "single_word": false,
28
  "lstrip": false,
29
  "rstrip": false,
 
137
  "vocab": {
138
  "<unk>": 0,
139
  "<s>": 1,
140
+ "<|im_end|>": 2,
141
  "<0x00>": 3,
142
  "<0x01>": 4,
143
  "<0x02>": 5,
tokenizer.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
3
- size 493443
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc460a0129515b7579ec9f63218012601729de4fbd1b5de8d56dc47e8a204a29
3
+ size 493449
tokenizer_config.json CHANGED
@@ -1,7 +1,6 @@
1
  {
2
  "add_bos_token": true,
3
  "add_eos_token": false,
4
- "add_prefix_space": true,
5
  "added_tokens_decoder": {
6
  "0": {
7
  "content": "<unk>",
@@ -20,7 +19,7 @@
20
  "special": true
21
  },
22
  "2": {
23
- "content": "</s>",
24
  "lstrip": false,
25
  "normalized": false,
26
  "rstrip": false,
@@ -29,14 +28,11 @@
29
  }
30
  },
31
  "bos_token": "<s>",
 
32
  "clean_up_tokenization_spaces": false,
33
- "eos_token": "</s>",
34
- "legacy": true,
35
- "model_max_length": 32768,
36
  "pad_token": "<unk>",
37
- "padding_side": "left",
38
- "sp_model_kwargs": {},
39
- "spaces_between_special_tokens": false,
40
  "tokenizer_class": "LlamaTokenizer",
41
  "unk_token": "<unk>",
42
  "use_default_system_prompt": false
 
1
  {
2
  "add_bos_token": true,
3
  "add_eos_token": false,
 
4
  "added_tokens_decoder": {
5
  "0": {
6
  "content": "<unk>",
 
19
  "special": true
20
  },
21
  "2": {
22
+ "content": "<|im_end|>",
23
  "lstrip": false,
24
  "normalized": false,
25
  "rstrip": false,
 
28
  }
29
  },
30
  "bos_token": "<s>",
31
+ "chat_template": "{% for message in messages %}{% if message['role'] == 'user' %}{{'<|im_start|>user\n' + message['content'] + '<|im_end|>\n'}}{% elif message['role'] == 'assistant' %}{{'<|im_start|>assistant\n' + message['content'] + '<|im_end|>\n' }}{% else %}{{ '<|im_start|>system\n' + message['content'] + '<|im_end|>\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
32
  "clean_up_tokenization_spaces": false,
33
+ "eos_token": "<|im_end|>",
34
+ "model_max_length": 1000000000000000019884624838656,
 
35
  "pad_token": "<unk>",
 
 
 
36
  "tokenizer_class": "LlamaTokenizer",
37
  "unk_token": "<unk>",
38
  "use_default_system_prompt": false