jon-tow commited on
Commit
b687348
1 Parent(s): 16584b7

test: gpt2 tokenizer conversion

Browse files
Files changed (5) hide show
  1. merges.txt +0 -0
  2. special_tokens_map.json +39 -0
  3. tokenizer.json +0 -0
  4. tokenizer_config.json +42 -0
  5. vocab.json +0 -0
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|endoftext|>",
4
+ "<|fim_prefix|>",
5
+ "<|fim_middle|>",
6
+ "<|fim_suffix|>",
7
+ "<|fim_pad|>",
8
+ "<gh_stars>",
9
+ "<filename>",
10
+ "<issue_start>",
11
+ "<issue_comment>",
12
+ "<issue_closed>",
13
+ "<jupyter_start>",
14
+ "<jupyter_text>",
15
+ "<jupyter_code>",
16
+ "<jupyter_output>",
17
+ "<empty_output>",
18
+ "<commit_before>",
19
+ "<commit_msg>",
20
+ "<commit_after>",
21
+ "<reponame>",
22
+ "<|endofprompt|>",
23
+ "<|im_start|>",
24
+ "<|im_end|>",
25
+ "<|pause|>",
26
+ "<|reg0|>",
27
+ "<|reg1|>",
28
+ "<|reg2|>",
29
+ "<|reg3|>",
30
+ "<|reg4|>",
31
+ "<|reg5|>",
32
+ "<|reg6|>",
33
+ "<|reg7|>",
34
+ "<|extra0|>"
35
+ ],
36
+ "bos_token": "<|endoftext|>",
37
+ "eos_token": "<|endoftext|>",
38
+ "unk_token": "<|endoftext|>"
39
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "additional_special_tokens": [
4
+ "<|endoftext|>",
5
+ "<|fim_prefix|>",
6
+ "<|fim_middle|>",
7
+ "<|fim_suffix|>",
8
+ "<|fim_pad|>",
9
+ "<gh_stars>",
10
+ "<filename>",
11
+ "<issue_start>",
12
+ "<issue_comment>",
13
+ "<issue_closed>",
14
+ "<jupyter_start>",
15
+ "<jupyter_text>",
16
+ "<jupyter_code>",
17
+ "<jupyter_output>",
18
+ "<empty_output>",
19
+ "<commit_before>",
20
+ "<commit_msg>",
21
+ "<commit_after>",
22
+ "<reponame>",
23
+ "<|endofprompt|>",
24
+ "<|im_start|>",
25
+ "<|im_end|>",
26
+ "<|pause|>",
27
+ "<|reg0|>",
28
+ "<|reg1|>",
29
+ "<|reg2|>",
30
+ "<|reg3|>",
31
+ "<|reg4|>",
32
+ "<|reg5|>",
33
+ "<|reg6|>",
34
+ "<|reg7|>",
35
+ "<|extra0|>"
36
+ ],
37
+ "bos_token": "<|endoftext|>",
38
+ "clean_up_tokenization_spaces": true,
39
+ "eos_token": "<|endoftext|>",
40
+ "tokenizer_class": "GPT2Tokenizer",
41
+ "unk_token": "<|endoftext|>"
42
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff