RASMUS commited on
Commit
c41b9dc
1 Parent(s): 17009c0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -11
app.py CHANGED
@@ -474,17 +474,18 @@ with demo:
474
  with gr.Column():
475
  gr.Markdown('''
476
  ### This space allows you to:
477
- #### 1. Download youtube video with a given url
478
- #### 2. Watch it in the first video component
479
- #### 3. Run automatic speech recognition on the video using fast Whisper models
480
- #### 4. Translate the recognized transcriptions to 26 languages supported by deepL
481
- #### 5. Download generated subtitles in .vtt and .srt formats
482
- #### 6. Watch the the original video with generated subtitles
483
  ''')
484
 
485
  with gr.Column():
486
  gr.Markdown('''
487
- ### 1. Copy any Youtube video URL to box below (But please consider using short videos so others won't get queued) or click one of the examples and then press button "1. Download Youtube video"-button:
 
488
  ''')
489
  examples = gr.Examples(examples=
490
  [ "https://www.youtube.com/watch?v=nlMuHtV82q8&ab_channel=NothingforSale24",
@@ -509,7 +510,7 @@ with demo:
509
  gr.Markdown('''
510
  ##### Here you can start the transcription and translation process.
511
  ##### Be aware that processing will last some time. With base model it is around 3x speed
512
- ##### Please select source language for better transcriptions. Using 'Let the model analyze' makes mistakes sometimes and may lead to bad transcriptions
513
  ''')
514
  selected_source_lang.render()
515
  selected_whisper_model.render()
@@ -530,9 +531,9 @@ with demo:
530
  with gr.Column():
531
  gr.Markdown('''
532
  ##### PLEASE READ BELOW
533
- ##### Here you will can translate transcriptions to 26 languages.
534
- ##### If spoken language is not in the list, translation might not work. In this case original transcriptions are used
535
- ##### ''')
536
  selected_translation_lang_2.render()
537
  translate_transcriptions_button = gr.Button("Step 3. Translate transcription")
538
  translate_transcriptions_button.click(translate_transcriptions, [transcription_df, selected_translation_lang_2], [transcription_and_translation_df, subtitle_files])
 
474
  with gr.Column():
475
  gr.Markdown('''
476
  ### This space allows you to:
477
+ 1. Download youtube video with a given url
478
+ 2. Watch it in the first video component
479
+ 3. Run automatic speech recognition on the video using fast Whisper models
480
+ 4. Translate the recognized transcriptions to 26 languages supported by deepL
481
+ 5. Download generated subtitles in .vtt and .srt formats
482
+ 6. Watch the the original video with generated subtitles
483
  ''')
484
 
485
  with gr.Column():
486
  gr.Markdown('''
487
+ ### 1. Copy any Youtube video URL to box below
488
+ (But please **consider using short videos** so others won't get queued) or click one of the examples and then press button "1. Download Youtube video"-button:
489
  ''')
490
  examples = gr.Examples(examples=
491
  [ "https://www.youtube.com/watch?v=nlMuHtV82q8&ab_channel=NothingforSale24",
 
510
  gr.Markdown('''
511
  ##### Here you can start the transcription and translation process.
512
  ##### Be aware that processing will last some time. With base model it is around 3x speed
513
+ ##### **Please select source language** for better transcriptions. Using 'Let the model analyze' makes mistakes sometimes and may lead to bad transcriptions
514
  ''')
515
  selected_source_lang.render()
516
  selected_whisper_model.render()
 
531
  with gr.Column():
532
  gr.Markdown('''
533
  ##### PLEASE READ BELOW
534
+ Here you will can translate transcriptions to 26 languages.
535
+ If spoken language is not in the list, translation might not work. In this case original transcriptions are used
536
+ ''')
537
  selected_translation_lang_2.render()
538
  translate_transcriptions_button = gr.Button("Step 3. Translate transcription")
539
  translate_transcriptions_button.click(translate_transcriptions, [transcription_df, selected_translation_lang_2], [transcription_and_translation_df, subtitle_files])