StevenChen16 commited on
Commit
b86a6f7
·
verified ·
1 Parent(s): b8763a5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +134 -22
app.py CHANGED
@@ -1,34 +1,146 @@
1
  import spaces
2
  import torch
3
- import whisperx
4
  import gradio as gr
5
- import torch
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
- # 检测设备类型
8
- device = "cuda" if torch.cuda.is_available() else "cpu"
9
- compute_type = "float16" if device == "cuda" else "int8"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
- @spaces.GPU # 确保在 GPU 环境下运行
12
- def load_model():
13
- # GPU 可用时加载模型
14
- return whisperx.load_model("large-v3", device=device, compute_type=compute_type)
15
 
16
- # 加载 WhisperX 模型
17
- model = load_model()
 
 
 
18
 
19
- def transcribe(audio_path):
20
- # 使用 WhisperX 进行转录
21
- result = model.transcribe(audio_path)
22
- return result['text']
23
 
24
- # 创建 Gradio 接口
25
- iface = gr.Interface(
 
 
 
 
 
 
26
  fn=transcribe,
27
- inputs=gr.Audio(source=["upload","microphone"], type="filepath"),
 
 
 
28
  outputs="text",
29
- title="WhisperX 语音转文字",
30
- description="上传音频文件,使用 WhisperX 模型进行转录。"
 
 
 
 
 
31
  )
32
 
33
- if __name__ == "__main__":
34
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import spaces
2
  import torch
3
+
4
  import gradio as gr
5
+ import yt_dlp as youtube_dl
6
+ from transformers import pipeline
7
+ from transformers.pipelines.audio_utils import ffmpeg_read
8
+
9
+ import tempfile
10
+ import os
11
+
12
+ MODEL_NAME = "openai/whisper-large-v3-turbo"
13
+ BATCH_SIZE = 8
14
+ FILE_LIMIT_MB = 1000
15
+ YT_LENGTH_LIMIT_S = 3600 # limit to 1 hour YouTube files
16
+
17
+ device = 0 if torch.cuda.is_available() else "cpu"
18
+
19
+ pipe = pipeline(
20
+ task="automatic-speech-recognition",
21
+ model=MODEL_NAME,
22
+ chunk_length_s=30,
23
+ device=device,
24
+ )
25
+
26
+
27
+ @spaces.GPU
28
+ def transcribe(inputs, task):
29
+ if inputs is None:
30
+ raise gr.Error("No audio file submitted! Please upload or record an audio file before submitting your request.")
31
+
32
+ text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
33
+ return text
34
+
35
+
36
+ def _return_yt_html_embed(yt_url):
37
+ video_id = yt_url.split("?v=")[-1]
38
+ HTML_str = (
39
+ f'<center> <iframe width="500" height="320" src="https://www.youtube.com/embed/{video_id}"> </iframe>'
40
+ " </center>"
41
+ )
42
+ return HTML_str
43
 
44
+ def download_yt_audio(yt_url, filename):
45
+ info_loader = youtube_dl.YoutubeDL()
46
+
47
+ try:
48
+ info = info_loader.extract_info(yt_url, download=False)
49
+ except youtube_dl.utils.DownloadError as err:
50
+ raise gr.Error(str(err))
51
+
52
+ file_length = info["duration_string"]
53
+ file_h_m_s = file_length.split(":")
54
+ file_h_m_s = [int(sub_length) for sub_length in file_h_m_s]
55
+
56
+ if len(file_h_m_s) == 1:
57
+ file_h_m_s.insert(0, 0)
58
+ if len(file_h_m_s) == 2:
59
+ file_h_m_s.insert(0, 0)
60
+ file_length_s = file_h_m_s[0] * 3600 + file_h_m_s[1] * 60 + file_h_m_s[2]
61
+
62
+ if file_length_s > YT_LENGTH_LIMIT_S:
63
+ yt_length_limit_hms = time.strftime("%HH:%MM:%SS", time.gmtime(YT_LENGTH_LIMIT_S))
64
+ file_length_hms = time.strftime("%HH:%MM:%SS", time.gmtime(file_length_s))
65
+ raise gr.Error(f"Maximum YouTube length is {yt_length_limit_hms}, got {file_length_hms} YouTube video.")
66
+
67
+ ydl_opts = {"outtmpl": filename, "format": "worstvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best"}
68
+
69
+ with youtube_dl.YoutubeDL(ydl_opts) as ydl:
70
+ try:
71
+ ydl.download([yt_url])
72
+ except youtube_dl.utils.ExtractorError as err:
73
+ raise gr.Error(str(err))
74
 
75
+ @spaces.GPU
76
+ def yt_transcribe(yt_url, task, max_filesize=75.0):
77
+ html_embed_str = _return_yt_html_embed(yt_url)
 
78
 
79
+ with tempfile.TemporaryDirectory() as tmpdirname:
80
+ filepath = os.path.join(tmpdirname, "video.mp4")
81
+ download_yt_audio(yt_url, filepath)
82
+ with open(filepath, "rb") as f:
83
+ inputs = f.read()
84
 
85
+ inputs = ffmpeg_read(inputs, pipe.feature_extractor.sampling_rate)
86
+ inputs = {"array": inputs, "sampling_rate": pipe.feature_extractor.sampling_rate}
 
 
87
 
88
+ text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
89
+
90
+ return html_embed_str, text
91
+
92
+
93
+ demo = gr.Blocks(theme=gr.themes.Ocean())
94
+
95
+ mf_transcribe = gr.Interface(
96
  fn=transcribe,
97
+ inputs=[
98
+ gr.Audio(sources="microphone", type="filepath"),
99
+ gr.Radio(["transcribe", "translate"], label="Task", value="transcribe"),
100
+ ],
101
  outputs="text",
102
+ title="Whisper Large V3 Turbo: Transcribe Audio",
103
+ description=(
104
+ "Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the"
105
+ f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe audio files"
106
+ " of arbitrary length."
107
+ ),
108
+ allow_flagging="never",
109
  )
110
 
111
+ file_transcribe = gr.Interface(
112
+ fn=transcribe,
113
+ inputs=[
114
+ gr.Audio(sources="upload", type="filepath", label="Audio file"),
115
+ gr.Radio(["transcribe", "translate"], label="Task", value="transcribe"),
116
+ ],
117
+ outputs="text",
118
+ title="Whisper Large V3: Transcribe Audio",
119
+ description=(
120
+ "Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the"
121
+ f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe audio files"
122
+ " of arbitrary length."
123
+ ),
124
+ allow_flagging="never",
125
+ )
126
+
127
+ yt_transcribe = gr.Interface(
128
+ fn=yt_transcribe,
129
+ inputs=[
130
+ gr.Textbox(lines=1, placeholder="Paste the URL to a YouTube video here", label="YouTube URL"),
131
+ gr.Radio(["transcribe", "translate"], label="Task", value="transcribe")
132
+ ],
133
+ outputs=["html", "text"],
134
+ title="Whisper Large V3: Transcribe YouTube",
135
+ description=(
136
+ "Transcribe long-form YouTube videos with the click of a button! Demo uses the checkpoint"
137
+ f" [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe video files of"
138
+ " arbitrary length."
139
+ ),
140
+ allow_flagging="never",
141
+ )
142
+
143
+ with demo:
144
+ gr.TabbedInterface([mf_transcribe, file_transcribe, yt_transcribe], ["Microphone", "Audio file", "YouTube"])
145
+
146
+ demo.queue().launch(ssr_mode=False)