codebyzeb commited on
Commit
06ad2ac
1 Parent(s): e80712e

Upload tokenizer

Browse files
Files changed (3) hide show
  1. tokenizer.json +95 -51
  2. tokenizer_config.json +1 -1
  3. vocab.json +1 -1
tokenizer.json CHANGED
@@ -22,7 +22,7 @@
22
  "special": true
23
  },
24
  {
25
- "id": 5,
26
  "content": "UTT_BOUNDARY",
27
  "single_word": false,
28
  "lstrip": false,
@@ -34,13 +34,6 @@
34
  "normalizer": {
35
  "type": "Sequence",
36
  "normalizers": [
37
- {
38
- "type": "Replace",
39
- "pattern": {
40
- "String": "\n"
41
- },
42
- "content": " UTT_BOUNDARY"
43
- },
44
  {
45
  "type": "Strip",
46
  "strip_left": true,
@@ -51,55 +44,106 @@
51
  "pre_tokenizer": {
52
  "type": "Whitespace"
53
  },
54
- "post_processor": null,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  "decoder": null,
56
  "model": {
57
  "type": "WordLevel",
58
  "vocab": {
59
  "UNK": 0,
60
  "PAD": 1,
61
- "BOS": 2,
62
- "EOS": 3,
63
- "WORD_BOUNDARY": 4,
64
- "UTT_BOUNDARY": 5,
65
- "": 6,
66
- "a": 7,
67
- "ɾ": 8,
68
- "k": 9,
69
- "t̠ʃ": 10,
70
- "i": 11,
71
- "": 12,
72
- "l": 13,
73
- "p": 14,
74
- "o": 15,
75
- "r": 16,
76
- "": 17,
77
- "n": 18,
78
- "m": 19,
79
- "ð": 20,
80
- "e": 21,
81
- "ts̻": 22,
82
- "β": 23,
83
- "": 24,
84
- "ʎ": 25,
85
- "b": 26,
86
- "": 27,
87
- "t": 28,
88
- "ɣ": 29,
89
- "ɡ": 30,
90
- "c": 31,
91
- "u": 32,
92
- "": 33,
93
- "d": 34,
94
- "ts̺": 35,
95
- "j": 36,
96
- "ɲ": 37,
97
- "f": 38,
98
- "ʃ": 39,
99
- "ɟ": 40,
100
- "": 41,
101
- "θ": 42,
102
- "x": 43
103
  },
104
  "unk_token": "UNK"
105
  }
 
22
  "special": true
23
  },
24
  {
25
+ "id": 3,
26
  "content": "UTT_BOUNDARY",
27
  "single_word": false,
28
  "lstrip": false,
 
34
  "normalizer": {
35
  "type": "Sequence",
36
  "normalizers": [
 
 
 
 
 
 
 
37
  {
38
  "type": "Strip",
39
  "strip_left": true,
 
44
  "pre_tokenizer": {
45
  "type": "Whitespace"
46
  },
47
+ "post_processor": {
48
+ "type": "TemplateProcessing",
49
+ "single": [
50
+ {
51
+ "SpecialToken": {
52
+ "id": "UTT_BOUNDARY",
53
+ "type_id": 0
54
+ }
55
+ },
56
+ {
57
+ "Sequence": {
58
+ "id": "A",
59
+ "type_id": 0
60
+ }
61
+ }
62
+ ],
63
+ "pair": [
64
+ {
65
+ "SpecialToken": {
66
+ "id": "UTT_BOUNDARY",
67
+ "type_id": 0
68
+ }
69
+ },
70
+ {
71
+ "Sequence": {
72
+ "id": "A",
73
+ "type_id": 0
74
+ }
75
+ },
76
+ {
77
+ "SpecialToken": {
78
+ "id": "UTT_BOUNDARY",
79
+ "type_id": 0
80
+ }
81
+ },
82
+ {
83
+ "Sequence": {
84
+ "id": "B",
85
+ "type_id": 1
86
+ }
87
+ }
88
+ ],
89
+ "special_tokens": {
90
+ "UTT_BOUNDARY": {
91
+ "id": "UTT_BOUNDARY",
92
+ "ids": [
93
+ 3
94
+ ],
95
+ "tokens": [
96
+ "UTT_BOUNDARY"
97
+ ]
98
+ }
99
+ }
100
+ },
101
  "decoder": null,
102
  "model": {
103
  "type": "WordLevel",
104
  "vocab": {
105
  "UNK": 0,
106
  "PAD": 1,
107
+ "WORD_BOUNDARY": 2,
108
+ "UTT_BOUNDARY": 3,
109
+ "": 4,
110
+ "a": 5,
111
+ "ɾ": 6,
112
+ "k": 7,
113
+ "t̠ʃ": 8,
114
+ "i": 9,
115
+ "": 10,
116
+ "l": 11,
117
+ "p": 12,
118
+ "o": 13,
119
+ "r": 14,
120
+ "": 15,
121
+ "n": 16,
122
+ "m": 17,
123
+ "ð": 18,
124
+ "e": 19,
125
+ "ts̻": 20,
126
+ "β": 21,
127
+ "s̻": 22,
128
+ "ʎ": 23,
129
+ "b": 24,
130
+ "": 25,
131
+ "t": 26,
132
+ "ɣ": 27,
133
+ "ɡ": 28,
134
+ "c": 29,
135
+ "u": 30,
136
+ "": 31,
137
+ "d": 32,
138
+ "ts̺": 33,
139
+ "j": 34,
140
+ "ɲ": 35,
141
+ "f": 36,
142
+ "ʃ": 37,
143
+ "ɟ": 38,
144
+ "": 39,
145
+ "θ": 40,
146
+ "x": 41
 
 
147
  },
148
  "unk_token": "UNK"
149
  }
tokenizer_config.json CHANGED
@@ -17,7 +17,7 @@
17
  "single_word": false,
18
  "special": true
19
  },
20
- "5": {
21
  "content": "UTT_BOUNDARY",
22
  "lstrip": false,
23
  "normalized": false,
 
17
  "single_word": false,
18
  "special": true
19
  },
20
+ "3": {
21
  "content": "UTT_BOUNDARY",
22
  "lstrip": false,
23
  "normalized": false,
vocab.json CHANGED
@@ -1 +1 @@
1
- {"UNK":0,"PAD":1,"BOS":2,"EOS":3,"WORD_BOUNDARY":4,"UTT_BOUNDARY":5,"":6,"a":7,"ɾ":8,"k":9,"t̠ʃ":10,"i":11,"s̺":12,"l":13,"p":14,"o":15,"r":16,"aɪ":17,"n":18,"m":19,"ð":20,"e":21,"ts̻":22,"β":23,"s̻":24,"ʎ":25,"b":26,"aʊ":27,"t":28,"ɣ":29,"ɡ":30,"c":31,"u":32,"eɪ":33,"d":34,"ts̺":35,"j":36,"ɲ":37,"f":38,"ʃ":39,"ɟ":40,"eʊ":41,"θ":42,"x":43}
 
1
+ {"UNK":0,"PAD":1,"WORD_BOUNDARY":2,"UTT_BOUNDARY":3,"":4,"a":5,"ɾ":6,"k":7,"t̠ʃ":8,"i":9,"s̺":10,"l":11,"p":12,"o":13,"r":14,"aɪ":15,"n":16,"m":17,"ð":18,"e":19,"ts̻":20,"β":21,"s̻":22,"ʎ":23,"b":24,"aʊ":25,"t":26,"ɣ":27,"ɡ":28,"c":29,"u":30,"eɪ":31,"d":32,"ts̺":33,"j":34,"ɲ":35,"f":36,"ʃ":37,"ɟ":38,"eʊ":39,"θ":40,"x":41}