oyemade commited on
Commit
90a4c59
β€’
1 Parent(s): 4f938ef

feat: gradio fixes

Browse files
Files changed (1) hide show
  1. app.py +10 -18
app.py CHANGED
@@ -3,7 +3,7 @@ from transformers import pipeline
3
  from transformers.pipelines.audio_utils import ffmpeg_read
4
  import gradio as gr
5
 
6
- MODEL_NAME = "oyemade/whisper-medium-yo"
7
  BATCH_SIZE = 8
8
 
9
  device = 0 if torch.cuda.is_available() else "cpu"
@@ -55,16 +55,14 @@ demo = gr.Blocks()
55
  mic_transcribe = gr.Interface(
56
  fn=transcribe,
57
  inputs=[
58
- gr.inputs.Audio(source="microphone", type="filepath", optional=True),
59
- gr.inputs.Radio(["transcribe", "translate"], label="Task", default="transcribe"),
60
- gr.inputs.Checkbox(default=False, label="Return timestamps"),
61
  ],
62
  outputs="text",
63
- layout="horizontal",
64
- theme="huggingface",
65
  title="NeoForm AI Demo: Transcribe Yoruba Audio",
66
  description=(
67
- "Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the"
68
  f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and πŸ€— Transformers to transcribe audio files"
69
  " of arbitrary length."
70
  ),
@@ -74,23 +72,17 @@ mic_transcribe = gr.Interface(
74
  file_transcribe = gr.Interface(
75
  fn=transcribe,
76
  inputs=[
77
- gr.inputs.Audio(source="upload", optional=True, label="Audio file", type="filepath"),
78
- gr.inputs.Radio(["transcribe", "translate"], label="Task", default="transcribe"),
79
- gr.inputs.Checkbox(default=False, label="Return timestamps"),
80
  ],
81
  outputs="text",
82
- layout="horizontal",
83
- theme="huggingface",
84
  title="NeoForm AI Demo: Transcribe Yoruba Audio",
85
  description=(
86
- "Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the"
87
  f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and πŸ€— Transformers to transcribe audio files"
88
  " of arbitrary length."
89
  ),
90
- examples=[
91
- ["./example.flac", "transcribe", False],
92
- ["./example.flac", "transcribe", True],
93
- ],
94
  cache_examples=True,
95
  allow_flagging="never",
96
  )
@@ -98,4 +90,4 @@ file_transcribe = gr.Interface(
98
  with demo:
99
  gr.TabbedInterface([mic_transcribe, file_transcribe], ["Transcribe Microphone", "Transcribe Audio File"])
100
 
101
- demo.launch(enable_queue=True)
 
3
  from transformers.pipelines.audio_utils import ffmpeg_read
4
  import gradio as gr
5
 
6
+ MODEL_NAME = "neoform-ai/whisper-medium-yoruba"
7
  BATCH_SIZE = 8
8
 
9
  device = 0 if torch.cuda.is_available() else "cpu"
 
55
  mic_transcribe = gr.Interface(
56
  fn=transcribe,
57
  inputs=[
58
+ gr.Audio(sources="microphone", type="filepath"),
59
+ gr.Radio(["transcribe", "translate"], label="Task"),
60
+ gr.Checkbox(label="Return timestamps"),
61
  ],
62
  outputs="text",
 
 
63
  title="NeoForm AI Demo: Transcribe Yoruba Audio",
64
  description=(
65
+ "Transcribe long-form microphone or audio inputs in Yoruba language with the click of a button! Demo uses the"
66
  f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and πŸ€— Transformers to transcribe audio files"
67
  " of arbitrary length."
68
  ),
 
72
  file_transcribe = gr.Interface(
73
  fn=transcribe,
74
  inputs=[
75
+ gr.Audio(sources="upload", label="Audio file", type="filepath"),
76
+ gr.Radio(["transcribe", "translate"], label="Task"),
77
+ gr.Checkbox(label="Return timestamps"),
78
  ],
79
  outputs="text",
 
 
80
  title="NeoForm AI Demo: Transcribe Yoruba Audio",
81
  description=(
82
+ "Transcribe long-form microphone or audio inputs in Yoruba language with the click of a button! Demo uses the"
83
  f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and πŸ€— Transformers to transcribe audio files"
84
  " of arbitrary length."
85
  ),
 
 
 
 
86
  cache_examples=True,
87
  allow_flagging="never",
88
  )
 
90
  with demo:
91
  gr.TabbedInterface([mic_transcribe, file_transcribe], ["Transcribe Microphone", "Transcribe Audio File"])
92
 
93
+ demo.launch()