Transformers
Polish
tokenizer
fast-tokenizer
polish
Inference Endpoints
pkedzia commited on
Commit
fa45eb4
1 Parent(s): f2f7101

Upload 5 files

Browse files
Files changed (5) hide show
  1. merges.txt +0 -0
  2. special_tokens_map.json +10 -0
  3. tokenizer.json +0 -0
  4. tokenizer_config.json +14 -0
  5. vocab.json +0 -0
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "bos_token",
4
+ "eos_token",
5
+ "unk_token"
6
+ ],
7
+ "bos_token": "<|endoftext|>",
8
+ "eos_token": "<|endoftext|>",
9
+ "unk_token": "<|endoftext|>"
10
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "additional_special_tokens": [
4
+ "bos_token",
5
+ "eos_token",
6
+ "unk_token"
7
+ ],
8
+ "bos_token": "<|endoftext|>",
9
+ "clean_up_tokenization_spaces": true,
10
+ "eos_token": "<|endoftext|>",
11
+ "model_max_length": 1024,
12
+ "tokenizer_class": "GPT2Tokenizer",
13
+ "unk_token": "<|endoftext|>"
14
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff