juulaii commited on
Commit
241ba79
1 Parent(s): 8ce4e5c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -10
app.py CHANGED
@@ -26,26 +26,25 @@ ui.theme = "peach"
26
  ui.article = """<h2>Pre-trained model Information</h2>
27
  <h3>Automatic Speech Recognition</h3>
28
  <p style='text-align: justify'>The model used for the ASR part of this space is from
29
- [facebook/wav2vec2-base-960h](https://huggingface.co/facebook/wav2vec2-base-960h) which is pretrained and fine-tuned on
30
  <b>960 hours of
31
  Librispeech</b> on 16kHz sampled speech audio. This model has a <b>word error rate (WER)</b> of <b>8.6 percent on
32
  noisy speech</b> and <b>5.2 percent on clean speech</b> on the standard LibriSpeech benchmark. More information can be
33
- found on its website at [wav2vec](https://ai.facebook.com/blog/wav2vec-20-learning-the-structure-of-speech-from-raw-audio)
34
  and
35
- original model is under [pytorch/fairseq](https://github.com/pytorch/fairseq/tree/main/examples/wav2vec).</p>
36
  <h3>Text Translator</h3>
37
- <p style='text-align: justify'>The English to Spanish text translator pre-trained model is from [Helsinki-NLP/opus-
38
- mt-en-es](https://huggingface.co/Helsinki-NLP/opus-mt-en-es) which is part of the <b>The Tatoeba Translation Challenge
39
  (v2021-08-07)</b> as seen from its github repo at
40
- [Helsinki-NLP/Tatoeba-Challenge](https://github.com/Helsinki-NLP/Tatoeba-Challenge). This project aims to develop machine
41
  translation in real-world
42
  cases for many languages. </p>
43
  <h3>Text to Speech</h3>
44
- <p style='text-align: justify'> The TTS model used is from [facebook/tts_transformer-es-css10]
45
- (https://huggingface.co/facebook/tts_transformer-es-css10).
46
  This model uses the <b>Fairseq(-py)</b> sequence modeling toolkit for speech synthesis, in this case, specifically TTS
47
- for Spanish. More information can be seen on their git at [speech_synthesis]
48
- (https://github.com/pytorch/fairseq/tree/main/examples/speech_synthesis). </p>
49
  """
50
 
51
 
 
26
  ui.article = """<h2>Pre-trained model Information</h2>
27
  <h3>Automatic Speech Recognition</h3>
28
  <p style='text-align: justify'>The model used for the ASR part of this space is from
29
+ [https://huggingface.co/facebook/wav2vec2-base-960h] which is pretrained and fine-tuned on
30
  <b>960 hours of
31
  Librispeech</b> on 16kHz sampled speech audio. This model has a <b>word error rate (WER)</b> of <b>8.6 percent on
32
  noisy speech</b> and <b>5.2 percent on clean speech</b> on the standard LibriSpeech benchmark. More information can be
33
+ found on its website at [https://ai.facebook.com/blog/wav2vec-20-learning-the-structure-of-speech-from-raw-audio]
34
  and
35
+ original model is under [https://github.com/pytorch/fairseq/tree/main/examples/wav2vec].</p>
36
  <h3>Text Translator</h3>
37
+ <p style='text-align: justify'>The English to Spanish text translator pre-trained model is from
38
+ [https://huggingface.co/Helsinki-NLP/opus-mt-en-es] which is part of the <b>The Tatoeba Translation Challenge
39
  (v2021-08-07)</b> as seen from its github repo at
40
+ [https://github.com/Helsinki-NLP/Tatoeba-Challenge]. This project aims to develop machine
41
  translation in real-world
42
  cases for many languages. </p>
43
  <h3>Text to Speech</h3>
44
+ <p style='text-align: justify'> The TTS model used is from [https://huggingface.co/facebook/tts_transformer-es-css10].
 
45
  This model uses the <b>Fairseq(-py)</b> sequence modeling toolkit for speech synthesis, in this case, specifically TTS
46
+ for Spanish. More information can be seen on their git at
47
+ [https://github.com/pytorch/fairseq/tree/main/examples/speech_synthesis]. </p>
48
  """
49
 
50