mwz commited on
Commit
8b96f73
1 Parent(s): 950147b

Upload tokenizer

Browse files
Files changed (3) hide show
  1. added_tokens.json +2 -2
  2. tokenizer_config.json +6 -7
  3. vocab.json +69 -81
added_tokens.json CHANGED
@@ -1,4 +1,4 @@
1
  {
2
- "</s>": 83,
3
- "<s>": 82
4
  }
 
1
  {
2
+ "</s>": 71,
3
+ "<s>": 70
4
  }
tokenizer_config.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "added_tokens_decoder": {
3
- "80": {
4
  "content": "[UNK]",
5
  "lstrip": true,
6
  "normalized": false,
@@ -8,7 +8,7 @@
8
  "single_word": false,
9
  "special": false
10
  },
11
- "81": {
12
  "content": "[PAD]",
13
  "lstrip": true,
14
  "normalized": false,
@@ -16,18 +16,18 @@
16
  "single_word": false,
17
  "special": false
18
  },
19
- "82": {
20
  "content": "<s>",
21
  "lstrip": false,
22
- "normalized": false,
23
  "rstrip": false,
24
  "single_word": false,
25
  "special": true
26
  },
27
- "83": {
28
  "content": "</s>",
29
  "lstrip": false,
30
- "normalized": false,
31
  "rstrip": false,
32
  "single_word": false,
33
  "special": true
@@ -39,7 +39,6 @@
39
  "eos_token": "</s>",
40
  "model_max_length": 1000000000000000019884624838656,
41
  "pad_token": "[PAD]",
42
- "processor_class": "Wav2Vec2BertProcessor",
43
  "replace_word_delimiter_char": " ",
44
  "target_lang": null,
45
  "tokenizer_class": "Wav2Vec2CTCTokenizer",
 
1
  {
2
  "added_tokens_decoder": {
3
+ "68": {
4
  "content": "[UNK]",
5
  "lstrip": true,
6
  "normalized": false,
 
8
  "single_word": false,
9
  "special": false
10
  },
11
+ "69": {
12
  "content": "[PAD]",
13
  "lstrip": true,
14
  "normalized": false,
 
16
  "single_word": false,
17
  "special": false
18
  },
19
+ "70": {
20
  "content": "<s>",
21
  "lstrip": false,
22
+ "normalized": true,
23
  "rstrip": false,
24
  "single_word": false,
25
  "special": true
26
  },
27
+ "71": {
28
  "content": "</s>",
29
  "lstrip": false,
30
+ "normalized": true,
31
  "rstrip": false,
32
  "single_word": false,
33
  "special": true
 
39
  "eos_token": "</s>",
40
  "model_max_length": 1000000000000000019884624838656,
41
  "pad_token": "[PAD]",
 
42
  "replace_word_delimiter_char": " ",
43
  "target_lang": null,
44
  "tokenizer_class": "Wav2Vec2CTCTokenizer",
vocab.json CHANGED
@@ -1,84 +1,72 @@
1
  {
2
- "[PAD]": 81,
3
- "[UNK]": 80,
4
- "`": 1,
5
  "|": 0,
6
- "،": 2,
7
- "ؐ": 3,
8
- "ؑ": 4,
9
- "ؓ": 5,
10
- "ؔ": 6,
11
- "؛": 7,
12
- "؟": 8,
13
- "ء": 9,
14
- "آ": 10,
15
- "أ": 11,
16
- "ؤ": 12,
17
- "ئ": 13,
18
- "ا": 14,
19
- "ب": 15,
20
- "ت": 16,
21
- "ث": 17,
22
- "ج": 18,
23
- "ح": 19,
24
- "خ": 20,
25
- "د": 21,
26
- "ذ": 22,
27
- "ر": 23,
28
- "ز": 24,
29
- "س": 25,
30
- "ش": 26,
31
- "ص": 27,
32
- "ض": 28,
33
- "ط": 29,
34
- "ظ": 30,
35
- "ع": 31,
36
- "غ": 32,
37
- "ف": 33,
38
- "ق": 34,
39
- "ل": 35,
40
- "م": 36,
41
- "ن": 37,
42
- "ه": 38,
43
- "و": 39,
44
- "ي": 40,
45
- "ً": 41,
46
- "َ": 42,
47
- "ُ": 43,
48
- "ِ": 44,
49
- "ّ": 45,
50
- "ٓ": 46,
51
- "ٔ": 47,
52
- "ٰ": 48,
53
- "ٴ": 49,
54
- "ٹ": 50,
55
- "پ": 51,
56
- "چ": 52,
57
- "ڈ": 53,
58
- "ڑ": 54,
59
- "ژ": 55,
60
- "ک": 56,
61
- "گ": 57,
62
- "ں": 58,
63
- "ھ": 59,
64
- "ہ": 60,
65
- "ۂ": 61,
66
- "ۃ": 62,
67
- "ی": 63,
68
- "ے": 64,
69
- "ۓ": 65,
70
- "۔": 66,
71
- "": 67,
72
- "": 68,
73
- "ﮭ": 69,
74
- "ﮯ": 70,
75
- "ﯾ": 71,
76
- "ﷲ": 72,
77
- "ﷺ": 73,
78
- "ﺗ": 74,
79
- "ﺘ": 75,
80
- "ﺩ": 76,
81
- "ﺲ": 77,
82
- "ﻧ": 78,
83
- "ﻮ": 79
84
  }
 
1
  {
2
+ "[PAD]": 69,
3
+ "[UNK]": 68,
 
4
  "|": 0,
5
+ "،": 1,
6
+ "ؐ": 2,
7
+ "ؑ": 3,
8
+ "ؓ": 4,
9
+ "ؔ": 5,
10
+ "؛": 6,
11
+ "؟": 7,
12
+ "ء": 8,
13
+ "آ": 9,
14
+ "أ": 10,
15
+ "ؤ": 11,
16
+ "ئ": 12,
17
+ "ا": 13,
18
+ "ب": 14,
19
+ "ت": 15,
20
+ "ث": 16,
21
+ "ج": 17,
22
+ "ح": 18,
23
+ "خ": 19,
24
+ "د": 20,
25
+ "ذ": 21,
26
+ "ر": 22,
27
+ "ز": 23,
28
+ "س": 24,
29
+ "ش": 25,
30
+ "ص": 26,
31
+ "ض": 27,
32
+ "ط": 28,
33
+ "ظ": 29,
34
+ "ع": 30,
35
+ "غ": 31,
36
+ "ف": 32,
37
+ "ق": 33,
38
+ "ل": 34,
39
+ "م": 35,
40
+ "ن": 36,
41
+ "ه": 37,
42
+ "و": 38,
43
+ "ي": 39,
44
+ "ً": 40,
45
+ "َ": 41,
46
+ "ُ": 42,
47
+ "ِ": 43,
48
+ "ّ": 44,
49
+ "ٓ": 45,
50
+ "ٔ": 46,
51
+ "ٰ": 47,
52
+ "ٴ": 48,
53
+ "ٹ": 49,
54
+ "پ": 50,
55
+ "چ": 51,
56
+ "ڈ": 52,
57
+ "ڑ": 53,
58
+ "ژ": 54,
59
+ "ک": 55,
60
+ "گ": 56,
61
+ "ں": 57,
62
+ "ھ": 58,
63
+ "ہ": 59,
64
+ "ۂ": 60,
65
+ "ۃ": 61,
66
+ "ی": 62,
67
+ "ے": 63,
68
+ "۔": 64,
69
+ "": 65,
70
+ "": 66,
71
+ "": 67
 
 
 
 
 
 
 
 
 
 
 
72
  }