smutuvi commited on
Commit
b20a10e
1 Parent(s): ff5848f

Upload tokenizer

Browse files
Files changed (4) hide show
  1. README.md +4 -4
  2. added_tokens.json +1 -1
  3. tokenizer_config.json +1 -18
  4. vocab.json +2 -2
README.md CHANGED
@@ -1,18 +1,18 @@
1
  ---
2
  license: mit
3
- base_model: facebook/w2v-bert-2.0
4
  tags:
5
  - generated_from_trainer
6
  datasets:
7
  - common_voice_16_0
8
  metrics:
9
  - wer
 
10
  model-index:
11
  - name: w2v-bert-2.0-swahili-colab-CV16.0_5epochs
12
  results:
13
  - task:
14
- name: Automatic Speech Recognition
15
  type: automatic-speech-recognition
 
16
  dataset:
17
  name: common_voice_16_0
18
  type: common_voice_16_0
@@ -20,9 +20,9 @@ model-index:
20
  split: test
21
  args: sw
22
  metrics:
23
- - name: Wer
24
- type: wer
25
  value: 0.8218669188312941
 
26
  ---
27
 
28
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
1
  ---
2
  license: mit
 
3
  tags:
4
  - generated_from_trainer
5
  datasets:
6
  - common_voice_16_0
7
  metrics:
8
  - wer
9
+ base_model: facebook/w2v-bert-2.0
10
  model-index:
11
  - name: w2v-bert-2.0-swahili-colab-CV16.0_5epochs
12
  results:
13
  - task:
 
14
  type: automatic-speech-recognition
15
+ name: Automatic Speech Recognition
16
  dataset:
17
  name: common_voice_16_0
18
  type: common_voice_16_0
 
20
  split: test
21
  args: sw
22
  metrics:
23
+ - type: wer
 
24
  value: 0.8218669188312941
25
+ name: Wer
26
  ---
27
 
28
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
added_tokens.json CHANGED
@@ -1,4 +1,4 @@
1
  {
2
  "</s>": 70,
3
- "<s>": 69
4
  }
 
1
  {
2
  "</s>": 70,
3
+ "[PAD]": 69
4
  }
tokenizer_config.json CHANGED
@@ -1,14 +1,6 @@
1
  {
2
  "added_tokens_decoder": {
3
- "67": {
4
- "content": "[UNK]",
5
- "lstrip": true,
6
- "normalized": false,
7
- "rstrip": true,
8
- "single_word": false,
9
- "special": false
10
- },
11
- "68": {
12
  "content": "[PAD]",
13
  "lstrip": true,
14
  "normalized": false,
@@ -16,14 +8,6 @@
16
  "single_word": false,
17
  "special": false
18
  },
19
- "69": {
20
- "content": "<s>",
21
- "lstrip": false,
22
- "normalized": false,
23
- "rstrip": false,
24
- "single_word": false,
25
- "special": true
26
- },
27
  "70": {
28
  "content": "</s>",
29
  "lstrip": false,
@@ -39,7 +23,6 @@
39
  "eos_token": "</s>",
40
  "model_max_length": 1000000000000000019884624838656,
41
  "pad_token": "[PAD]",
42
- "processor_class": "Wav2Vec2BertProcessor",
43
  "replace_word_delimiter_char": " ",
44
  "target_lang": null,
45
  "tokenizer_class": "Wav2Vec2CTCTokenizer",
 
1
  {
2
  "added_tokens_decoder": {
3
+ "69": {
 
 
 
 
 
 
 
 
4
  "content": "[PAD]",
5
  "lstrip": true,
6
  "normalized": false,
 
8
  "single_word": false,
9
  "special": false
10
  },
 
 
 
 
 
 
 
 
11
  "70": {
12
  "content": "</s>",
13
  "lstrip": false,
 
23
  "eos_token": "</s>",
24
  "model_max_length": 1000000000000000019884624838656,
25
  "pad_token": "[PAD]",
 
26
  "replace_word_delimiter_char": " ",
27
  "target_lang": null,
28
  "tokenizer_class": "Wav2Vec2CTCTokenizer",
vocab.json CHANGED
@@ -4,8 +4,8 @@
4
  "*": 3,
5
  "/": 4,
6
  "=": 5,
7
- "[PAD]": 68,
8
- "[UNK]": 67,
9
  "\\": 6,
10
  "_": 7,
11
  "`": 8,
 
4
  "*": 3,
5
  "/": 4,
6
  "=": 5,
7
+ "[PAD]": 69,
8
+ "[UNK]": 69,
9
  "\\": 6,
10
  "_": 7,
11
  "`": 8,