suisuyy commited on
Commit
7233e3e
1 Parent(s): c8a6daa
__pycache__/app.cpython-310.pyc ADDED
Binary file (2.71 kB). View file
 
app.py CHANGED
@@ -1,93 +1,50 @@
1
  import torch
 
2
 
3
  import gradio as gr
4
  import spaces
5
- import yt_dlp as youtube_dl
6
  from transformers import pipeline
7
  from transformers.pipelines.audio_utils import ffmpeg_read
8
 
9
- import tempfile
10
- import os
11
 
12
- MODEL_NAME = "openai/whisper-large-v3"
13
  BATCH_SIZE = 8
14
- FILE_LIMIT_MB = 1000
15
- YT_LENGTH_LIMIT_S = 3600 # limit to 1 hour YouTube files
16
 
17
  device = 0 if torch.cuda.is_available() else "cpu"
18
 
19
- pipe = pipeline(
20
- task="automatic-speech-recognition",
21
- model=MODEL_NAME,
22
- chunk_length_s=30,
23
- device=device,
24
- )
 
 
 
25
 
26
  @spaces.GPU
27
- def transcribe(inputs, task):
28
  if inputs is None:
29
  raise gr.Error("No audio file submitted! Please upload or record an audio file before submitting your request.")
30
 
 
 
 
 
 
31
  text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
32
- return text
33
 
 
34
 
35
- def _return_yt_html_embed(yt_url):
36
- video_id = yt_url.split("?v=")[-1]
37
- HTML_str = (
38
- f'<center> <iframe width="500" height="320" src="https://www.youtube.com/embed/{video_id}"> </iframe>'
39
- " </center>"
40
  )
41
- return HTML_str
42
-
43
- def download_yt_audio(yt_url, filename):
44
- info_loader = youtube_dl.YoutubeDL()
45
-
46
- try:
47
- info = info_loader.extract_info(yt_url, download=False)
48
- except youtube_dl.utils.DownloadError as err:
49
- raise gr.Error(str(err))
50
-
51
- file_length = info["duration_string"]
52
- file_h_m_s = file_length.split(":")
53
- file_h_m_s = [int(sub_length) for sub_length in file_h_m_s]
54
-
55
- if len(file_h_m_s) == 1:
56
- file_h_m_s.insert(0, 0)
57
- if len(file_h_m_s) == 2:
58
- file_h_m_s.insert(0, 0)
59
- file_length_s = file_h_m_s[0] * 3600 + file_h_m_s[1] * 60 + file_h_m_s[2]
60
-
61
- if file_length_s > YT_LENGTH_LIMIT_S:
62
- yt_length_limit_hms = time.strftime("%HH:%MM:%SS", time.gmtime(YT_LENGTH_LIMIT_S))
63
- file_length_hms = time.strftime("%HH:%MM:%SS", time.gmtime(file_length_s))
64
- raise gr.Error(f"Maximum YouTube length is {yt_length_limit_hms}, got {file_length_hms} YouTube video.")
65
-
66
- ydl_opts = {"outtmpl": filename, "format": "worstvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best"}
67
-
68
- with youtube_dl.YoutubeDL(ydl_opts) as ydl:
69
- try:
70
- ydl.download([yt_url])
71
- except youtube_dl.utils.ExtractorError as err:
72
- raise gr.Error(str(err))
73
-
74
-
75
- def yt_transcribe(yt_url, task, max_filesize=75.0):
76
- html_embed_str = _return_yt_html_embed(yt_url)
77
-
78
- with tempfile.TemporaryDirectory() as tmpdirname:
79
- filepath = os.path.join(tmpdirname, "video.mp4")
80
- download_yt_audio(yt_url, filepath)
81
- with open(filepath, "rb") as f:
82
- inputs = f.read()
83
-
84
- inputs = ffmpeg_read(inputs, pipe.feature_extractor.sampling_rate)
85
- inputs = {"array": inputs, "sampling_rate": pipe.feature_extractor.sampling_rate}
86
-
87
- text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
88
-
89
- return html_embed_str, text
90
 
 
91
 
92
  demo = gr.Blocks()
93
 
@@ -96,14 +53,19 @@ mf_transcribe = gr.Interface(
96
  inputs=[
97
  gr.Audio(type="filepath"),
98
  gr.Radio(["transcribe", "translate"], label="Task", value="transcribe"),
 
 
 
 
 
 
99
  ],
100
- outputs="text",
101
  theme="huggingface",
102
- title="Whisper Large V3: Transcribe Audio",
103
  description=(
104
- "Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the OpenAI Whisper"
105
- f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe audio files"
106
- " of arbitrary length."
107
  ),
108
  allow_flagging="never",
109
  )
@@ -113,37 +75,24 @@ file_transcribe = gr.Interface(
113
  inputs=[
114
  gr.Audio(type="filepath", label="Audio file"),
115
  gr.Radio(["transcribe", "translate"], label="Task", value="transcribe"),
 
 
 
 
 
 
116
  ],
117
- outputs="text",
118
- theme="huggingface",
119
- title="Whisper Large V3: Transcribe Audio",
120
- description=(
121
- "Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the OpenAI Whisper"
122
- f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe audio files"
123
- " of arbitrary length."
124
- ),
125
- allow_flagging="never",
126
- )
127
-
128
- yt_transcribe = gr.Interface(
129
- fn=yt_transcribe,
130
- inputs=[
131
- gr.Textbox(lines=1, placeholder="Paste the URL to a YouTube video here", label="YouTube URL"),
132
- gr.Radio(["transcribe", "translate"], label="Task", value="transcribe")
133
- ],
134
- outputs=["html", "text"],
135
  theme="huggingface",
136
- title="Whisper Large V3: Transcribe YouTube",
137
  description=(
138
- "Transcribe long-form YouTube videos with the click of a button! Demo uses the OpenAI Whisper checkpoint"
139
- f" [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe video files of"
140
- " arbitrary length."
141
  ),
142
  allow_flagging="never",
143
  )
144
 
145
  with demo:
146
- gr.TabbedInterface([mf_transcribe, file_transcribe, yt_transcribe], ["Microphone", "Audio file", "YouTube"])
147
-
148
- demo.launch()
149
 
 
 
1
  import torch
2
+ import time
3
 
4
  import gradio as gr
5
  import spaces
 
6
  from transformers import pipeline
7
  from transformers.pipelines.audio_utils import ffmpeg_read
8
 
9
+ DEFAULT_MODEL_NAME = "openai/whisper-tiny"
 
10
 
 
11
  BATCH_SIZE = 8
 
 
12
 
13
  device = 0 if torch.cuda.is_available() else "cpu"
14
 
15
+ def load_pipeline(model_name):
16
+ return pipeline(
17
+ task="automatic-speech-recognition",
18
+ model=model_name,
19
+ chunk_length_s=30,
20
+ device=device,
21
+ )
22
+
23
+ pipe = load_pipeline(DEFAULT_MODEL_NAME)
24
 
25
  @spaces.GPU
26
+ def transcribe(inputs, task, model_name):
27
  if inputs is None:
28
  raise gr.Error("No audio file submitted! Please upload or record an audio file before submitting your request.")
29
 
30
+ global pipe
31
+ if model_name != pipe.model.name_or_path:
32
+ pipe = load_pipeline(model_name)
33
+
34
+ start_time = time.time() # Record the start time
35
  text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
36
+ end_time = time.time() # Record the end time
37
 
38
+ transcription_time = end_time - start_time # Calculate the transcription time
39
 
40
+ # Create the transcription time output with additional information
41
+ transcription_time_output = (
42
+ f"Transcription Time: {transcription_time:.2f} seconds\n"
43
+ f"Model Used: {model_name}\n"
44
+ f"Device Used: {'GPU' if torch.cuda.is_available() else 'CPU'}"
45
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
 
47
+ return text, transcription_time_output
48
 
49
  demo = gr.Blocks()
50
 
 
53
  inputs=[
54
  gr.Audio(type="filepath"),
55
  gr.Radio(["transcribe", "translate"], label="Task", value="transcribe"),
56
+ gr.Textbox(
57
+ label="Model Name",
58
+ value=DEFAULT_MODEL_NAME,
59
+ placeholder="Enter the model name",
60
+ info="Some available models: distil-whisper/distil-large-v3 distil-whisper/distil-medium.en Systran/faster-distil-whisper-large-v3 Systran/faster-whisper-large-v3 Systran/faster-whisper-medium openai/whisper-tiny , openai/whisper-base, openai/whisper-medium, openai/whisper-large-v3"
61
+ ),
62
  ],
63
+ outputs=[gr.TextArea(label="Transcription"), gr.TextArea(label="Transcription Info")],
64
  theme="huggingface",
65
+ title="Whisper Transcription",
66
  description=(
67
+ "Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the specified OpenAI Whisper"
68
+ " checkpoint and 🤗 Transformers to transcribe audio files of arbitrary length."
 
69
  ),
70
  allow_flagging="never",
71
  )
 
75
  inputs=[
76
  gr.Audio(type="filepath", label="Audio file"),
77
  gr.Radio(["transcribe", "translate"], label="Task", value="transcribe"),
78
+ gr.Textbox(
79
+ label="Model Name",
80
+ value=DEFAULT_MODEL_NAME,
81
+ placeholder="Enter the model name",
82
+ info="Some available models: openai/whisper-tiny, openai/whisper-base, openai/whisper-medium, openai/whisper-large-v2"
83
+ ),
84
  ],
85
+ outputs=[gr.TextArea(label="Transcription"), gr.TextArea(label="Transcription Info")],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
  theme="huggingface",
87
+ title="Whisper Transcription",
88
  description=(
89
+ "Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the specified OpenAI Whisper"
90
+ " checkpoint and 🤗 Transformers to transcribe audio files of arbitrary length."
 
91
  ),
92
  allow_flagging="never",
93
  )
94
 
95
  with demo:
96
+ gr.TabbedInterface([mf_transcribe, file_transcribe], ["Microphone", "Audio file"])
 
 
97
 
98
+ demo.launch(share=True)
audio_sample/onetoeight.mp3 ADDED
Binary file (137 kB). View file
 
audio_sample/onetofive_enjpzh.mp3 ADDED
Binary file (224 kB). View file
 
requirements.txt CHANGED
@@ -1,3 +1,4 @@
1
  git+https://github.com/huggingface/transformers
2
  torch
3
  yt-dlp
 
 
1
  git+https://github.com/huggingface/transformers
2
  torch
3
  yt-dlp
4
+ gradio==4.8.0