antypasd commited on
Commit
9a33aa8
1 Parent(s): e99229e

Upload tokenizer

Browse files
Files changed (3) hide show
  1. merges.txt +1 -1
  2. tokenizer.json +1 -0
  3. tokenizer_config.json +1 -2
merges.txt CHANGED
@@ -1,4 +1,4 @@
1
- #version: 0.2 - Trained by `huggingface/tokenizers`
2
  Ġ t
3
  Ġ a
4
  h e
 
1
+ #version: 0.2
2
  Ġ t
3
  Ġ a
4
  h e
tokenizer.json CHANGED
@@ -87,6 +87,7 @@
87
  "continuing_subword_prefix": "",
88
  "end_of_word_suffix": "",
89
  "fuse_unk": false,
 
90
  "vocab": {
91
  "<s>": 0,
92
  "<pad>": 1,
 
87
  "continuing_subword_prefix": "",
88
  "end_of_word_suffix": "",
89
  "fuse_unk": false,
90
+ "byte_fallback": false,
91
  "vocab": {
92
  "<s>": 0,
93
  "<pad>": 1,
tokenizer_config.json CHANGED
@@ -1,15 +1,14 @@
1
  {
2
  "add_prefix_space": false,
3
  "bos_token": "<s>",
 
4
  "cls_token": "<s>",
5
  "eos_token": "</s>",
6
  "errors": "replace",
7
  "mask_token": "<mask>",
8
  "model_max_length": 512,
9
- "name_or_path": "cardiffnlp/twitter-roberta-base-2022-154m",
10
  "pad_token": "<pad>",
11
  "sep_token": "</s>",
12
- "special_tokens_map_file": null,
13
  "tokenizer_class": "RobertaTokenizer",
14
  "trim_offsets": true,
15
  "unk_token": "<unk>"
 
1
  {
2
  "add_prefix_space": false,
3
  "bos_token": "<s>",
4
+ "clean_up_tokenization_spaces": true,
5
  "cls_token": "<s>",
6
  "eos_token": "</s>",
7
  "errors": "replace",
8
  "mask_token": "<mask>",
9
  "model_max_length": 512,
 
10
  "pad_token": "<pad>",
11
  "sep_token": "</s>",
 
12
  "tokenizer_class": "RobertaTokenizer",
13
  "trim_offsets": true,
14
  "unk_token": "<unk>"