wjbmattingly commited on
Commit
70091ec
·
1 Parent(s): ab3a2d3

simplified app

Browse files
Files changed (3) hide show
  1. .DS_Store +0 -0
  2. app.py +33 -133
  3. requirements.txt +2 -1
.DS_Store CHANGED
Binary files a/.DS_Store and b/.DS_Store differ
 
app.py CHANGED
@@ -1,150 +1,50 @@
1
- # original app: https://huggingface.co/spaces/xianbao/whisper-v3-zero
2
-
3
- import torch
4
-
5
  import gradio as gr
 
 
 
6
  import spaces
7
- import yt_dlp as youtube_dl
8
- from transformers import pipeline
9
- from transformers.pipelines.audio_utils import ffmpeg_read
10
-
11
- import tempfile
12
- import os
13
-
14
- MODEL_NAME = "TheirStory/whisper-small-xhosa"
15
- BATCH_SIZE = 8
16
- FILE_LIMIT_MB = 1000
17
- YT_LENGTH_LIMIT_S = 3600 # limit to 1 hour YouTube files
18
 
19
- device = 0 if torch.cuda.is_available() else "cpu"
20
-
21
- pipe = pipeline(
22
- task="automatic-speech-recognition",
23
- model=MODEL_NAME,
24
- chunk_length_s=30,
25
- device=device,
26
- )
27
 
28
  @spaces.GPU
29
- def transcribe(inputs, task):
30
- if inputs is None:
31
- raise gr.Error("No audio file submitted! Please upload or record an audio file before submitting your request.")
32
-
33
- text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
34
- return text
35
-
36
-
37
- def _return_yt_html_embed(yt_url):
38
- video_id = yt_url.split("?v=")[-1]
39
- HTML_str = (
40
- f'<center> <iframe width="500" height="320" src="https://www.youtube.com/embed/{video_id}"> </iframe>'
41
- " </center>"
42
- )
43
- return HTML_str
44
-
45
- def download_yt_audio(yt_url, filename):
46
- info_loader = youtube_dl.YoutubeDL()
47
-
48
- try:
49
- info = info_loader.extract_info(yt_url, download=False)
50
- except youtube_dl.utils.DownloadError as err:
51
- raise gr.Error(str(err))
52
-
53
- file_length = info["duration_string"]
54
- file_h_m_s = file_length.split(":")
55
- file_h_m_s = [int(sub_length) for sub_length in file_h_m_s]
56
-
57
- if len(file_h_m_s) == 1:
58
- file_h_m_s.insert(0, 0)
59
- if len(file_h_m_s) == 2:
60
- file_h_m_s.insert(0, 0)
61
- file_length_s = file_h_m_s[0] * 3600 + file_h_m_s[1] * 60 + file_h_m_s[2]
62
-
63
- if file_length_s > YT_LENGTH_LIMIT_S:
64
- yt_length_limit_hms = time.strftime("%HH:%MM:%SS", time.gmtime(YT_LENGTH_LIMIT_S))
65
- file_length_hms = time.strftime("%HH:%MM:%SS", time.gmtime(file_length_s))
66
- raise gr.Error(f"Maximum YouTube length is {yt_length_limit_hms}, got {file_length_hms} YouTube video.")
67
-
68
- ydl_opts = {"outtmpl": filename, "format": "worstvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best"}
69
-
70
- with youtube_dl.YoutubeDL(ydl_opts) as ydl:
71
- try:
72
- ydl.download([yt_url])
73
- except youtube_dl.utils.ExtractorError as err:
74
- raise gr.Error(str(err))
75
 
 
 
 
 
 
76
 
77
- def yt_transcribe(yt_url, task, max_filesize=75.0):
78
- html_embed_str = _return_yt_html_embed(yt_url)
79
 
80
- with tempfile.TemporaryDirectory() as tmpdirname:
81
- filepath = os.path.join(tmpdirname, "video.mp4")
82
- download_yt_audio(yt_url, filepath)
83
- with open(filepath, "rb") as f:
84
- inputs = f.read()
85
 
86
- inputs = ffmpeg_read(inputs, pipe.feature_extractor.sampling_rate)
87
- inputs = {"array": inputs, "sampling_rate": pipe.feature_extractor.sampling_rate}
88
 
89
- text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
 
90
 
91
- return html_embed_str, text
92
 
93
-
94
- demo = gr.Blocks()
95
-
96
- mf_transcribe = gr.Interface(
97
- fn=transcribe,
98
- inputs=[
99
- gr.Audio(type="filepath"),
100
- gr.Radio(["transcribe", "translate"], label="Task", value="transcribe"),
101
- ],
102
- outputs="text",
103
- theme="huggingface",
104
- title="Whisper Large V3: Transcribe Audio",
105
- description=(
106
- "Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the OpenAI Whisper"
107
- f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe audio files"
108
- " of arbitrary length."
109
- ),
110
- allow_flagging="never",
111
- )
112
-
113
- file_transcribe = gr.Interface(
114
- fn=transcribe,
115
  inputs=[
116
- gr.Audio(type="filepath", label="Audio file"),
117
- gr.Radio(["transcribe", "translate"], label="Task", value="transcribe"),
118
  ],
119
  outputs="text",
120
- theme="huggingface",
121
- title="Whisper Large V3: Transcribe Audio",
122
- description=(
123
- "Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the OpenAI Whisper"
124
- f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe audio files"
125
- " of arbitrary length."
126
- ),
127
- allow_flagging="never",
128
- )
129
-
130
- yt_transcribe = gr.Interface(
131
- fn=yt_transcribe,
132
- inputs=[
133
- gr.Textbox(lines=1, placeholder="Paste the URL to a YouTube video here", label="YouTube URL"),
134
- gr.Radio(["transcribe", "translate"], label="Task", value="transcribe")
135
- ],
136
- outputs=["html", "text"],
137
- theme="huggingface",
138
- title="Whisper Large V3: Transcribe YouTube",
139
- description=(
140
- "Transcribe long-form YouTube videos with the click of a button! Demo uses the OpenAI Whisper checkpoint"
141
- f" [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe video files of"
142
- " arbitrary length."
143
- ),
144
- allow_flagging="never",
145
  )
146
 
147
- with demo:
148
- gr.TabbedInterface([mf_transcribe, file_transcribe, yt_transcribe], ["Microphone", "Audio file", "YouTube"])
149
-
150
- demo.launch()
 
 
 
 
 
1
  import gradio as gr
2
+ from transformers import WhisperProcessor, WhisperForConditionalGeneration
3
+ import torch
4
+ import librosa
5
  import spaces
 
 
 
 
 
 
 
 
 
 
 
6
 
7
+ # Load the model and processor
8
+ model_name = "TheirStory/whisper-small-xhosa"
9
+ processor = WhisperProcessor.from_pretrained(model_name)
10
+ model = WhisperForConditionalGeneration.from_pretrained(model_name)
 
 
 
 
11
 
12
  @spaces.GPU
13
+ def transcribe_audio(audio):
14
+ if torch.cuda.is_available():
15
+ model = model.to("cuda")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
+ # Load the audio file
18
+ if isinstance(audio, str): # If it's a file path
19
+ audio_array, sampling_rate = librosa.load(audio, sr=16000)
20
+ else: # If it's a tuple (audio_array, sampling_rate)
21
+ audio_array, sampling_rate = audio
22
 
23
+ # Process the audio
24
+ input_features = processor(audio_array, sampling_rate=sampling_rate, return_tensors="pt").input_features
25
 
26
+ if torch.cuda.is_available():
27
+ input_features = input_features.to("cuda")
 
 
 
28
 
29
+ # Generate token ids
30
+ generated_ids = model.generate(input_features)
31
 
32
+ # Decode token ids to text
33
+ transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
34
 
35
+ return transcription
36
 
37
+ # Create the Gradio interface
38
+ iface = gr.Interface(
39
+ fn=transcribe_audio,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  inputs=[
41
+ gr.Audio(source="microphone", type="numpy", label="Record Audio"),
42
+ gr.Audio(source="upload", type="filepath", label="Upload Audio File")
43
  ],
44
  outputs="text",
45
+ title="Xhosa Audio Transcription",
46
+ description="Record or upload Xhosa audio to get its transcription using the TheirStory/whisper-small-xhosa model."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  )
48
 
49
+ # Launch the app
50
+ iface.launch()
 
 
requirements.txt CHANGED
@@ -1,3 +1,4 @@
1
  git+https://github.com/huggingface/transformers
2
  torch
3
- yt-dlp
 
 
1
  git+https://github.com/huggingface/transformers
2
  torch
3
+ yt-dlp
4
+ librosa