jeffh commited on
Commit
1a3ef5d
1 Parent(s): f7ede81

improvements:

Browse files

- added support for v2 whisper models
- language support
- support for transcription with timestamp
- .srt and .csv formats

Files changed (1) hide show
  1. app.py +64 -34
app.py CHANGED
@@ -2,29 +2,65 @@ import gradio as gr
2
  import whisper
3
  from pytube import YouTube
4
 
5
- loaded_model = whisper.load_model("base")
6
- current_size = 'base'
7
- def inference(link):
8
- yt = YouTube(link)
9
- path = yt.streams.filter(only_audio=True)[0].download(filename="audio.mp4")
10
- options = whisper.DecodingOptions(without_timestamps=True)
11
- results = loaded_model.transcribe(path)
12
- return results['text']
13
-
14
- def change_model(size):
15
- if size == current_size:
16
- return
17
- loaded_model = whisper.load_model(size)
18
- current_size = size
19
-
20
- def populate_metadata(link):
21
- yt = YouTube(link)
22
- return yt.thumbnail_url, yt.title
23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  title="Youtube Whisperer"
25
  description="Speech to text transcription of Youtube videos using OpenAI's Whisper"
26
- block = gr.Blocks()
27
 
 
28
  with block:
29
  gr.HTML(
30
  """
@@ -40,23 +76,17 @@ with block:
40
  )
41
  with gr.Group():
42
  with gr.Box():
43
- sz = gr.Dropdown(label="Model Size", choices=['base','small', 'medium', 'large'], value='base')
44
-
 
 
45
  link = gr.Textbox(label="YouTube Link")
46
-
47
  with gr.Row().style(mobile_collapse=False, equal_height=True):
48
- title = gr.Label(label="Video Title", placeholder="Title")
49
  img = gr.Image(label="Thumbnail")
50
- text = gr.Textbox(
51
- label="Transcription",
52
- placeholder="Transcription Output",
53
- lines=5)
54
  with gr.Row().style(mobile_collapse=False, equal_height=True):
55
  btn = gr.Button("Transcribe")
56
-
57
- # Events
58
- btn.click(inference, inputs=[link], outputs=[text])
59
- link.change(populate_metadata, inputs=[link], outputs=[img, title])
60
- sz.change(change_model, inputs=[sz], outputs=[])
61
-
62
- block.launch(debug=True)
 
2
  import whisper
3
  from pytube import YouTube
4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
+ class GradioInference():
7
+ def __init__(self):
8
+ self.sizes = list(whisper._MODELS.keys())
9
+ self.langs = ["none"] + sorted(list(whisper.tokenizer.LANGUAGES.values()))
10
+ self.current_size = "base"
11
+ self.loaded_model = whisper.load_model(self.current_size)
12
+ self.yt = None
13
+
14
+ def __call__(self, link, lang, size, subs):
15
+ if self.yt is None:
16
+ self.yt = YouTube(link)
17
+ path = self.yt.streams.filter(only_audio=True)[0].download(filename="tmp.mp4")
18
+
19
+ if lang == "none":
20
+ lang = None
21
+
22
+ if size != self.current_size:
23
+ self.loaded_model = whisper.load_model(size)
24
+ self.current_size = size
25
+ results = self.loaded_model.transcribe(path, language=lang)
26
+
27
+ if subs == "None":
28
+ return results["text"]
29
+ elif subs == ".srt":
30
+ return self.srt(results["segments"])
31
+ elif ".csv" == ".csv":
32
+ return self.csv(results["segments"])
33
+
34
+ def srt(self, segments):
35
+ output = ""
36
+ for i, segment in enumerate(segments):
37
+ output += f"{i+1}\n"
38
+ output += f"{self.format_time(segment['start'])} --> {self.format_time(segment['end'])}\n"
39
+ output += f"{segment['text']}\n\n"
40
+ return output
41
+
42
+ def csv(self, segments):
43
+ output = ""
44
+ for segment in segments:
45
+ output += f"{segment['start']},{segment['end']},{segment['text']}\n"
46
+ return output
47
+
48
+ def format_time(self, time):
49
+ hours = time//3600
50
+ minutes = (time - hours*3600)//60
51
+ seconds = time - hours*3600 - minutes*60
52
+ milliseconds = (time - int(time))*1000
53
+ return f"{int(hours):02d}:{int(minutes):02d}:{int(seconds):02d},{int(milliseconds):03d}"
54
+
55
+ def populate_metadata(self, link):
56
+ self.yt = YouTube(link)
57
+ return self.yt.thumbnail_url, self.yt.title
58
+
59
+ gio = GradioInference()
60
  title="Youtube Whisperer"
61
  description="Speech to text transcription of Youtube videos using OpenAI's Whisper"
 
62
 
63
+ block = gr.Blocks()
64
  with block:
65
  gr.HTML(
66
  """
 
76
  )
77
  with gr.Group():
78
  with gr.Box():
79
+ sz = gr.Dropdown(label="Model Size", choices=gio.sizes, value='base')
80
+ lang = gr.Dropdown(label="Language", choices=gio.langs, value="none")
81
+ with gr.Row().style(mobile_collapse=False, equal_height=True):
82
+ wt = gr.Radio(["None", ".srt", ".csv"], label="With Timestamps?")
83
  link = gr.Textbox(label="YouTube Link")
84
+ title = gr.Label(label="Video Title")
85
  with gr.Row().style(mobile_collapse=False, equal_height=True):
 
86
  img = gr.Image(label="Thumbnail")
87
+ text = gr.Textbox(label="Transcription", placeholder="Transcription Output", lines=5)
 
 
 
88
  with gr.Row().style(mobile_collapse=False, equal_height=True):
89
  btn = gr.Button("Transcribe")
90
+ btn.click(gio, inputs=[link, lang, sz, wt], outputs=[text])
91
+ link.change(gio.populate_metadata, inputs=[link], outputs=[img, title])
92
+ block.launch()