nrshoudi commited on
Commit
adee72b
·
1 Parent(s): c97f0a9

Upload tokenizer

Browse files
Files changed (3) hide show
  1. added_tokens.json +2 -2
  2. tokenizer_config.json +18 -17
  3. vocab.json +38 -36
added_tokens.json CHANGED
@@ -1,4 +1,4 @@
1
  {
2
- "</s>": 44,
3
- "<s>": 43
4
  }
 
1
  {
2
+ "</s>": 46,
3
+ "<s>": 45
4
  }
tokenizer_config.json CHANGED
@@ -1,22 +1,22 @@
1
  {
2
  "added_tokens_decoder": {
3
- "41": {
4
  "content": "[UNK]",
5
- "lstrip": true,
6
- "normalized": false,
7
- "rstrip": true,
8
  "single_word": false,
9
- "special": false
10
  },
11
- "42": {
12
  "content": "[PAD]",
13
- "lstrip": true,
14
- "normalized": false,
15
- "rstrip": true,
16
  "single_word": false,
17
- "special": false
18
  },
19
- "43": {
20
  "content": "<s>",
21
  "lstrip": false,
22
  "normalized": true,
@@ -24,7 +24,7 @@
24
  "single_word": false,
25
  "special": true
26
  },
27
- "44": {
28
  "content": "</s>",
29
  "lstrip": false,
30
  "normalized": true,
@@ -35,14 +35,15 @@
35
  },
36
  "bos_token": "<s>",
37
  "clean_up_tokenization_spaces": true,
38
- "do_lower_case": false,
39
  "eos_token": "</s>",
40
  "model_max_length": 1000000000000000019884624838656,
41
  "pad_token": "[PAD]",
 
 
 
42
  "processor_class": "Wav2Vec2Processor",
43
- "replace_word_delimiter_char": " ",
44
- "target_lang": null,
45
- "tokenizer_class": "Wav2Vec2CTCTokenizer",
46
  "unk_token": "[UNK]",
47
- "word_delimiter_token": "|"
48
  }
 
1
  {
2
  "added_tokens_decoder": {
3
+ "43": {
4
  "content": "[UNK]",
5
+ "lstrip": false,
6
+ "normalized": true,
7
+ "rstrip": false,
8
  "single_word": false,
9
+ "special": true
10
  },
11
+ "44": {
12
  "content": "[PAD]",
13
+ "lstrip": false,
14
+ "normalized": true,
15
+ "rstrip": false,
16
  "single_word": false,
17
+ "special": true
18
  },
19
+ "45": {
20
  "content": "<s>",
21
  "lstrip": false,
22
  "normalized": true,
 
24
  "single_word": false,
25
  "special": true
26
  },
27
+ "46": {
28
  "content": "</s>",
29
  "lstrip": false,
30
  "normalized": true,
 
35
  },
36
  "bos_token": "<s>",
37
  "clean_up_tokenization_spaces": true,
38
+ "do_phonemize": false,
39
  "eos_token": "</s>",
40
  "model_max_length": 1000000000000000019884624838656,
41
  "pad_token": "[PAD]",
42
+ "phone_delimiter_token": " ",
43
+ "phonemizer_backend": "espeak",
44
+ "phonemizer_lang": "en-us",
45
  "processor_class": "Wav2Vec2Processor",
46
+ "tokenizer_class": "Wav2Vec2PhonemeCTCTokenizer",
 
 
47
  "unk_token": "[UNK]",
48
+ "word_delimiter_token": null
49
  }
vocab.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
- "[PAD]": 42,
3
- "[UNK]": 41,
4
- "|": 0,
5
  "ء": 1,
6
  "آ": 2,
7
  "أ": 3,
@@ -9,37 +9,39 @@
9
  "إ": 5,
10
  "ئ": 6,
11
  "ا": 7,
12
- "ب": 8,
13
- "ة": 9,
14
- "ت": 10,
15
- "ث": 11,
16
- "ج": 12,
17
- "ح": 13,
18
- "خ": 14,
19
- "د": 15,
20
- "ذ": 16,
21
- "ر": 17,
22
- "ز": 18,
23
- "س": 19,
24
- "ش": 20,
25
- "ص": 21,
26
- "ض": 22,
27
- "ط": 23,
28
- "ظ": 24,
29
- "ع": 25,
30
- "غ": 26,
31
- "ف": 27,
32
- "ق": 28,
33
- "ك": 29,
34
- "ل": 30,
35
- "م": 31,
36
- "ن": 32,
37
- "ه": 33,
38
- "و": 34,
39
- "ى": 35,
40
- "ي": 36,
41
- "ً": 37,
42
- "چ": 38,
43
- "ک": 39,
44
- "ی": 40
 
 
45
  }
 
1
  {
2
+ " ": 0,
3
+ "[PAD]": 44,
4
+ "[UNK]": 43,
5
  "ء": 1,
6
  "آ": 2,
7
  "أ": 3,
 
9
  "إ": 5,
10
  "ئ": 6,
11
  "ا": 7,
12
+ "اً": 8,
13
+ "ب": 9,
14
+ "ة": 10,
15
+ "ت": 11,
16
+ "ث": 12,
17
+ "ج": 13,
18
+ "ح": 14,
19
+ "خ": 15,
20
+ "د": 16,
21
+ "ذ": 17,
22
+ "ر": 18,
23
+ "ز": 19,
24
+ "س": 20,
25
+ "ش": 21,
26
+ "ص": 22,
27
+ "ض": 23,
28
+ "ط": 24,
29
+ "ظ": 25,
30
+ "ع": 26,
31
+ "غ": 27,
32
+ "ف": 28,
33
+ "ق": 29,
34
+ "ك": 30,
35
+ "ل": 31,
36
+ "لا": 32,
37
+ "م": 33,
38
+ "ن": 34,
39
+ "ه": 35,
40
+ "و": 36,
41
+ "ى": 37,
42
+ "ي": 38,
43
+ "چ": 39,
44
+ "ڨ": 40,
45
+ "ک": 41,
46
+ "ی": 42
47
  }