Automatic Speech Recognition
NeMo
PyTorch
4 languages
automatic-speech-translation
speech
audio
Transformer
FastConformer
Conformer
NeMo
hf-asr-leaderboard
Eval Results
nithinraok commited on
Commit
1dec236
1 Parent(s): 551b53f

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -3
README.md CHANGED
@@ -331,7 +331,7 @@ Another recommended option is to use a json manifest as input, where each line i
331
  # Example of a line in input_manifest.json
332
  {
333
  "audio_filepath": "/path/to/audio.wav", # path to the audio file
334
- "duration": 10000.0, # duration of the audio
335
  "taskname": "asr", # use "ast" for speech-to-text translation
336
  "source_lang": "en", # language of the audio input, set `source_lang`==`target_lang` for ASR, choices=['en','de','es','fr']
337
  "target_lang": "en", # language of the text output, choices=['en','de','es','fr']
@@ -363,7 +363,7 @@ An example manifest for transcribing English audios can be:
363
  # Example of a line in input_manifest.json
364
  {
365
  "audio_filepath": "/path/to/audio.wav", # path to the audio file
366
- "duration": 10000.0, # duration of the audio
367
  "taskname": "asr",
368
  "source_lang": "en", # language of the audio input, set `source_lang`==`target_lang` for ASR, choices=['en','de','es','fr']
369
  "target_lang": "en", # language of the text output, choices=['en','de','es','fr']
@@ -380,7 +380,7 @@ An example manifest for transcribing English audios into German text can be:
380
  # Example of a line in input_manifest.json
381
  {
382
  "audio_filepath": "/path/to/audio.wav", # path to the audio file
383
- "duration": 10000.0, # duration of the audio
384
  "taskname": "ast",
385
  "source_lang": "en", # language of the audio input, choices=['en','de','es','fr']
386
  "target_lang": "de", # language of the text output, choices=['en','de','es','fr']
 
331
  # Example of a line in input_manifest.json
332
  {
333
  "audio_filepath": "/path/to/audio.wav", # path to the audio file
334
+ "duration": None, # duration of the audio
335
  "taskname": "asr", # use "ast" for speech-to-text translation
336
  "source_lang": "en", # language of the audio input, set `source_lang`==`target_lang` for ASR, choices=['en','de','es','fr']
337
  "target_lang": "en", # language of the text output, choices=['en','de','es','fr']
 
363
  # Example of a line in input_manifest.json
364
  {
365
  "audio_filepath": "/path/to/audio.wav", # path to the audio file
366
+ "duration": None, # duration of the audio
367
  "taskname": "asr",
368
  "source_lang": "en", # language of the audio input, set `source_lang`==`target_lang` for ASR, choices=['en','de','es','fr']
369
  "target_lang": "en", # language of the text output, choices=['en','de','es','fr']
 
380
  # Example of a line in input_manifest.json
381
  {
382
  "audio_filepath": "/path/to/audio.wav", # path to the audio file
383
+ "duration": None, # duration of the audio
384
  "taskname": "ast",
385
  "source_lang": "en", # language of the audio input, choices=['en','de','es','fr']
386
  "target_lang": "de", # language of the text output, choices=['en','de','es','fr']