ydshieh HF staff commited on
Commit
ba5fa2f
1 Parent(s): d327f9b

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -3
README.md CHANGED
@@ -23,7 +23,7 @@ model-index:
23
  metrics:
24
  - name: Test CER
25
  type: cer
26
- value: 41.99
27
  ---
28
 
29
  # Wav2Vec2-Large-XLSR-53-Chinese-zh-cn-gpt
@@ -117,7 +117,7 @@ processor = Wav2Vec2Processor.from_pretrained("ydshieh/wav2vec2-large-xlsr-53-ch
117
  model = Wav2Vec2ForCTC.from_pretrained("ydshieh/wav2vec2-large-xlsr-53-chinese-zh-cn-gpt")
118
  model.to("cuda")
119
 
120
- chars_to_ignore_regex = '[\\\\\\\\\\\\\\\\,\\\\\\\\\\\\\\\\?\\\\\\\\\\\\\\\\.\\\\\\\\\\\\\\\\!\\\\\\\\\\\\\\\\-\\\\\\\\\\\\\\\\;\\\\\\\\\\\\\\\\:"\\\\\\\\\\\\\\\\“\\\\\\\\\\\\\\\\%\\\\\\\\\\\\\\\\‘\\\\\\\\\\\\\\\\”\\\\\\\\\\\\\\\\�\\\\\\\\\\\\\\\\.\\\\\\\\\\\\\\\\⋯\\\\\\\\\\\\\\\\!\\\\\\\\\\\\\\\\-\\\\\\\\\\\\\\\\:\\\\\\\\\\\\\\\\–\\\\\\\\\\\\\\\\。\\\\\\\\\\\\\\\\》\\\\\\\\\\\\\\\\,\\\\\\\\\\\\\\\\)\\\\\\\\\\\\\\\\,\\\\\\\\\\\\\\\\?\\\\\\\\\\\\\\\\;\\\\\\\\\\\\\\\\~\\\\\\\\\\\\\\\\~\\\\\\\\\\\\\\\\…\\\\\\\\\\\\\\\\︰\\\\\\\\\\\\\\\\,\\\\\\\\\\\\\\\\(\\\\\\\\\\\\\\\\」\\\\\\\\\\\\\\\\‧\\\\\\\\\\\\\\\\《\\\\\\\\\\\\\\\\﹔\\\\\\\\\\\\\\\\、\\\\\\\\\\\\\\\\—\\\\\\\\\\\\\\\\/\\\\\\\\\\\\\\\\,\\\\\\\\\\\\\\\\「\\\\\\\\\\\\\\\\﹖\\\\\\\\\\\\\\\\·\\\\\\\\\\\\\\\\×\\\\\\\\\\\\\\\\̃\\\\\\\\\\\\\\\\̌\\\\\\\\\\\\\\\\ε\\\\\\\\\\\\\\\\λ\\\\\\\\\\\\\\\\μ\\\\\\\\\\\\\\\\и\\\\\\\\\\\\\\\\т\\\\\\\\\\\\\\\\─\\\\\\\\\\\\\\\\□\\\\\\\\\\\\\\\\〈\\\\\\\\\\\\\\\\〉\\\\\\\\\\\\\\\\『\\\\\\\\\\\\\\\\』\\\\\\\\\\\\\\\\ア\\\\\\\\\\\\\\\\オ\\\\\\\\\\\\\\\\カ\\\\\\\\\\\\\\\\チ\\\\\\\\\\\\\\\\ド\\\\\\\\\\\\\\\\ベ\\\\\\\\\\\\\\\\ャ\\\\\\\\\\\\\\\\ヤ\\\\\\\\\\\\\\\\ン\\\\\\\\\\\\\\\\・\\\\\\\\\\\\\\\\丶\\\\\\\\\\\\\\\\a\\\\\\\\\\\\\\\\b\\\\\\\\\\\\\\\\f\\\\\\\\\\\\\\\\g\\\\\\\\\\\\\\\\i\\\\\\\\\\\\\\\\n\\\\\\\\\\\\\\\\p\\\\\\\\\\\\\\\\t' + "\\\\\\\\\\\\\\\\']"
121
 
122
  resampler = torchaudio.transforms.Resample(48_000, 16_000)
123
 
@@ -148,7 +148,7 @@ result = test_dataset.map(evaluate, batched=True, batch_size=8)
148
  print("CER: {:2f}".format(100 * chunked_cer(predictions=result["pred_strings"], targets=result["sentence"], chunk_size=1000)))
149
  ```
150
 
151
- **Test Result**: 41.987498 %
152
 
153
 
154
  ## Training
 
23
  metrics:
24
  - name: Test CER
25
  type: cer
26
+ value: 30.50
27
  ---
28
 
29
  # Wav2Vec2-Large-XLSR-53-Chinese-zh-cn-gpt
 
117
  model = Wav2Vec2ForCTC.from_pretrained("ydshieh/wav2vec2-large-xlsr-53-chinese-zh-cn-gpt")
118
  model.to("cuda")
119
 
120
+ chars_to_ignore_regex = '[\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\,\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\?\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\.\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\!\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\-\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\;\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\:"\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\“\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\%\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\‘\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\”\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\�\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\.\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\⋯\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\!\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\-\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\:\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\–\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\。\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\》\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\,\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\)\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\,\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\?\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\;\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\~\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\~\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\…\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\︰\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\,\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\(\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\」\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\‧\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\《\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\﹔\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\、\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\—\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\/\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\,\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\「\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\﹖\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\·\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\×\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\̃\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\̌\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ε\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\λ\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\μ\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\и\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\т\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\─\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\□\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\〈\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\〉\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\『\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\』\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ア\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\オ\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\カ\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\チ\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ド\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ベ\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ャ\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ヤ\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ン\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\・\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\丶\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\a\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\b\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\f\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\g\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\i\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\n\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\p\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\t' + "\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\']"
121
 
122
  resampler = torchaudio.transforms.Resample(48_000, 16_000)
123
 
 
148
  print("CER: {:2f}".format(100 * chunked_cer(predictions=result["pred_strings"], targets=result["sentence"], chunk_size=1000)))
149
  ```
150
 
151
+ **Test Result**: 30.497816 %
152
 
153
 
154
  ## Training