kobakhit commited on
Commit
c4254a4
0 Parent(s):

first commit

Browse files
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ test.ipynb
2
+ secrets.toml
3
+ models/*
4
+ models
.streamlit/config.toml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ [theme]
2
+ primaryColor = "#696969s"
3
+ backgroundColor = "#000000"
4
+ secondaryBackgroundColor = "#282828"
5
+ textColor = "#fafafa"
6
+ font = "sans serif"
README.md ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Speech To Chat
3
+ emoji: 🐨
4
+ colorFrom: gray
5
+ colorTo: gray
6
+ sdk: streamlit
7
+ sdk_version: 1.27.2
8
+ app_file: app.py
9
+ pinned: false
10
+ ---
11
+
12
+ Speaker Diarization app that also has transcribing and AI Chat features.
13
+
14
+ The following code is an application to perform speech diarization (the process of separating an audio stream into segments according to speaker identity) and transcription (the process of translating speech into written text). It uses both PyAnnote and Whisper APIs, and can process audio either uploaded from a local file or fetched from a YouTube video URL.
15
+
16
+ ## TO DO
17
+ - [ ] Asynchrounous Whisper requests [plan](https://stackoverflow.com/a/63179518)
18
+
19
+ # References
20
+ - [pyannote.audio](https://github.com/pyannote/pyannote-audio)
21
+ - [HuggingFace pyannote diarization](https://huggingface.co/pyannote/speaker-diarization-3.0)
22
+ - [Whisper API](https://platform.openai.com/docs/guides/speech-to-text/quickstart)
app.py ADDED
@@ -0,0 +1,326 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import streamlit_ext as ste
3
+ import openai
4
+ from pydub import AudioSegment
5
+ from pytube import YouTube
6
+ import pytube
7
+ import io
8
+ from pyannote.audio import Pipeline
9
+ from pyannote.audio.pipelines.utils.hook import ProgressHook
10
+ from pyannote.database.util import load_rttm
11
+ from pyannote.core import Annotation, Segment, notebook
12
+ import time
13
+ import json
14
+ import torch
15
+ import urllib.parse as urlparse
16
+ from urllib.parse import urlencode
17
+ import os
18
+
19
+ import matplotlib
20
+ matplotlib.use('Agg')
21
+ from matplotlib import pyplot as plt
22
+
23
+ st.set_page_config(
24
+ page_title="Speech-to-chat",
25
+ page_icon = '🌊'
26
+ )
27
+
28
+ def create_audio_stream(audio):
29
+ return io.BytesIO(audio.export(format="wav").read())
30
+
31
+ def add_query_parameter(link, params):
32
+ url_parts = list(urlparse.urlparse(link))
33
+ query = dict(urlparse.parse_qsl(url_parts[4]))
34
+ query.update(params)
35
+
36
+ url_parts[4] = urlencode(query)
37
+
38
+ return urlparse.urlunparse(url_parts)
39
+
40
+ def youtube_video_id(value):
41
+ """
42
+ Examples:
43
+ - http://youtu.be/SA2iWivDJiE
44
+ - http://www.youtube.com/watch?v=_oPAwA_Udwc&feature=feedu
45
+ - http://www.youtube.com/embed/SA2iWivDJiE
46
+ - http://www.youtube.com/v/SA2iWivDJiE?version=3&hl=en_US
47
+ """
48
+ query = urlparse.urlparse(value)
49
+ if query.hostname == 'youtu.be':
50
+ return query.path[1:]
51
+ if query.hostname in ('www.youtube.com', 'youtube.com'):
52
+ if query.path == '/watch':
53
+ p = urlparse.parse_qs(query.query)
54
+ return p['v'][0]
55
+ if query.path[:7] == '/embed/':
56
+ return query.path.split('/')[2]
57
+ if query.path[:3] == '/v/':
58
+ return query.path.split('/')[2]
59
+ # fail?
60
+ return None
61
+
62
+
63
+ def load_rttm_file(rttm_path):
64
+ return load_rttm(rttm_path)['stream']
65
+
66
+
67
+ def load_audio(uploaded_audio):
68
+ return AudioSegment.from_file(uploaded_audio)
69
+
70
+
71
+ # Set your OpenAI, Hugging Face API keys
72
+ openai.api_key = os.getenv('openai')
73
+ hf_api_key = os.getenv('hf')
74
+
75
+ st.title("Speech Diarization and Speech-to-Text with PyAnnote and Whisper")
76
+ reddit_thread = 'https://www.reddit.com/r/dataisbeautiful/comments/17413bq/oc_speech_diarization_app_that_transcribes_audio'
77
+ with st.expander('About', expanded=True):
78
+ st.markdown(f'''
79
+ Given an audio file this app will
80
+ - [x] 1. Identify and diarize the speakers using `pyannote` [HuggingFace Speaker Diarization api](https://huggingface.co/pyannote/speaker-diarization-3.0)
81
+ - [x] 2. Transcribe the audio and attribute to speakers using [OpenAi Whisper API](https://platform.openai.com/docs/guides/speech-to-text/quickstart)
82
+ - [ ] 3. Set up an LLM chat with the transcript loaded into its knowledge database, so that a user can "talk" to the transcript of the audio file (WIP)
83
+
84
+ This version will only process up to first 6 minutes of an audio file due to limited resources of Streamlit.io apps.
85
+ A local version with access to a GPU can process 1 hour of audio in 1 to 5 minutes.
86
+ If you would like to use this app at scale reach out directly by creating an issue on github [🤖](https://github.com/KobaKhit/speech-to-text-app/issues)!
87
+
88
+ Rule of thumb, for this Streamlit.io hosted app it takes half the duration of the audio to complete processing, ex. g. 6 minute youtube video will take 3 minutes to diarize.
89
+
90
+ [github repo](https://github.com/KobaKhit/speech-to-text-app)
91
+ ''')
92
+
93
+
94
+ option = st.radio("Select source:", ["Upload an audio file", "Use YouTube link","See Example"], index=2)
95
+
96
+ # Upload audio file
97
+ if option == "Upload an audio file":
98
+ uploaded_audio = st.file_uploader("Upload an audio file (MP3 or WAV)", type=["mp3", "wav","mp4"])
99
+ with st.expander('Optional Parameters'):
100
+ rttm = st.file_uploader("Upload .rttm if you already have one", type=["rttm"])
101
+ transcript_file = st.file_uploader("Upload transcipt json", type=["json"])
102
+ youtube_link = st.text_input('Youtube link of the audio sample')
103
+
104
+ if uploaded_audio is not None:
105
+ st.audio(uploaded_audio, format="audio/wav", start_time=0)
106
+ audio_name = uploaded_audio.name
107
+ audio = load_audio(uploaded_audio)
108
+
109
+ # sample_rate = st.number_input("Enter the sample rate of the audio", min_value=8000, max_value=48000)
110
+ # audio = audio.set_frame_rate(sample_rate)
111
+
112
+ # use youtube link
113
+ elif option == "Use YouTube link":
114
+
115
+ youtube_link_raw = st.text_input("Enter the YouTube video URL:")
116
+ youtube_link = f'https://youtu.be/{youtube_video_id(youtube_link_raw)}'
117
+
118
+ with st.expander('Optional Parameters'):
119
+ rttm = st.file_uploader("Upload .rttm if you already have one", type=["rttm"])
120
+ transcript_file = st.file_uploader("Upload transcipt json", type=["json"])
121
+ if youtube_link_raw:
122
+ st.write(f"Fetching audio from YouTube: {youtube_link}")
123
+ try:
124
+ yt = YouTube(youtube_link)
125
+ audio_stream = yt.streams.filter(only_audio=True).first()
126
+ audio_name = audio_stream.default_filename
127
+ st.write(f"Downloaded {audio_name}")
128
+ except pytube.exceptions.AgeRestrictedError:
129
+ st.stop('Age restricted videos cannot be processed.')
130
+
131
+ try:
132
+ os.remove('sample.mp4')
133
+ except OSError:
134
+ pass
135
+ audio_file = audio_stream.download(filename='sample.mp4')
136
+ time.sleep(2)
137
+ audio = load_audio('sample.mp4')
138
+ st.audio(create_audio_stream(audio), format="audio/mp4", start_time=0)
139
+ # sample_rate = st.number_input("Enter the sample rate of the audio", min_value=8000, max_value=48000)
140
+ # audio = audio.set_frame_rate(sample_rate)
141
+ # except Exception as e:
142
+ # st.write(f"Error: {str(e)}")
143
+ elif option == 'See Example':
144
+ youtube_link = 'https://www.youtube.com/watch?v=TamrOZX9bu8'
145
+ audio_name = 'Stephen A. Smith has JOKES with Shannon Sharpe'
146
+ st.write(f'Loaded audio file from {youtube_link} - Stephen A. Smith has JOKES with Shannon Sharpe 👏😂')
147
+ if os.path.isfile('example/steve a smith jokes.mp4'):
148
+ audio = load_audio('example/steve a smith jokes.mp4')
149
+ else:
150
+ yt = YouTube(youtube_link)
151
+ audio_stream = yt.streams.filter(only_audio=True).first()
152
+ audio_file = audio_stream.download(filename='sample.mp4')
153
+ time.sleep(2)
154
+ audio = load_audio('sample.mp4')
155
+
156
+ if os.path.isfile("example/steve a smith jokes.rttm"):
157
+ rttm = "example/steve a smith jokes.rttm"
158
+ if os.path.isfile('example/steve a smith jokes.json'):
159
+ transcript_file = 'example/steve a smith jokes.json'
160
+
161
+ st.audio(create_audio_stream(audio), format="audio/mp4", start_time=0)
162
+
163
+
164
+
165
+ # Diarize
166
+ if "audio" in locals():
167
+ st.write('Performing Diarization...')
168
+ # create stream
169
+ duration = audio.duration_seconds
170
+ if duration > 360:
171
+ st.info('Only processing the first 6 minutes of the audio due to Streamlit.io resource limits.')
172
+ audio = audio[:360*1000]
173
+ duration = audio.duration_seconds
174
+
175
+
176
+ # Perform diarization with PyAnnote
177
+ # "pyannote/speaker-diarization-3.0",
178
+ # use_auth_token=hf_api_key
179
+ pipeline = Pipeline.from_pretrained(
180
+ "pyannote/speaker-diarization-3.0", use_auth_token=hf_api_key)
181
+ if torch.cuda.device_count() > 0: # use gpu if available
182
+ pipeline.to(torch.device('cuda'))
183
+
184
+ # run the pipeline on an audio file
185
+ if 'rttm' in locals() and rttm != None:
186
+ st.write(f'Loading {rttm}')
187
+ diarization = load_rttm_file(rttm)
188
+ else:
189
+ # with ProgressHook() as hook:
190
+ audio_ = create_audio_stream(audio)
191
+ # diarization = pipeline(audio_, hook=hook)
192
+ diarization = pipeline(audio_)
193
+ # dump the diarization output to disk using RTTM format
194
+ with open(f'{audio_name.split(".")[0]}.rttm', "w") as f:
195
+ diarization.write_rttm(f)
196
+
197
+ # Display the diarization results
198
+ st.write("Diarization Results:")
199
+
200
+ annotation = Annotation()
201
+ sp_chunks = []
202
+ progress_text = f"Processing 1/{len(sp_chunks)}..."
203
+ my_bar = st.progress(0, text=progress_text)
204
+ counter = 0
205
+ n_tracks = len([a for a in diarization.itertracks(yield_label=True)])
206
+ for turn, _, speaker in diarization.itertracks(yield_label=True):
207
+ annotation[turn] = speaker
208
+ progress_text = f"Processing {counter}/{len(sp_chunks)}..."
209
+ my_bar.progress((counter+1)/n_tracks, text=progress_text)
210
+ counter +=1
211
+ temp = {'speaker': speaker,
212
+ 'start': turn.start, 'end': turn.end, 'duration': turn.end-turn.start,
213
+ 'audio': audio[turn.start*1000:turn.end*1000]}
214
+ if 'transcript_file' in locals() and transcript_file == None:
215
+ temp['audio_stream'] = create_audio_stream(audio[turn.start*1000:turn.end*1000])
216
+ sp_chunks.append(temp)
217
+
218
+ # plot
219
+ notebook.crop = Segment(-1, duration + 1)
220
+ figure, ax = plt.subplots(figsize=(10,3))
221
+ notebook.plot_annotation(annotation, ax=ax, time=True, legend=True)
222
+ figure.tight_layout()
223
+ # save to file
224
+ st.pyplot(figure)
225
+
226
+ st.write('Speakers and Audio Samples')
227
+ with st.expander('Samples', expanded=True):
228
+ for speaker in set(s['speaker'] for s in sp_chunks):
229
+ temp = max(filter(lambda d: d['speaker'] == speaker, sp_chunks), key=lambda x: x['duration'])
230
+ speak_time = sum(c['duration'] for c in filter(lambda d: d['speaker'] == speaker, sp_chunks))
231
+ rate = 100*min((speak_time, duration))/duration
232
+ speaker_summary = f"{temp['speaker']} ({round(rate)}% of video duration): start={temp['start']:.1f}s stop={temp['end']:.1f}s"
233
+ if youtube_link != None:
234
+ speaker_summary += f" {add_query_parameter(youtube_link, {'t':str(int(temp['start']))})}"
235
+ st.write(speaker_summary)
236
+ st.audio(create_audio_stream(temp['audio']))
237
+
238
+
239
+ # st.write("Transcription with Whisper ASR:")
240
+
241
+ st.divider()
242
+ # # Perform transcription with Whisper ASR
243
+ st.write('Transcribing using Whisper API (150 requests limit)...')
244
+ container = st.container()
245
+
246
+ limit = 150
247
+ progress_text = f"Processing 1/{len(sp_chunks[:limit])}..."
248
+ my_bar = st.progress(0, text=progress_text)
249
+ with st.expander('Transcript', expanded=True):
250
+ if 'transcript_file' in locals() and transcript_file != None:
251
+ with open(transcript_file,'r') as f:
252
+ sp_chunks_loaded = json.load(f)
253
+ for i,s in enumerate(sp_chunks_loaded):
254
+ if s['transcript'] != None:
255
+ transcript_summary = f"{s['speaker']} start={float(s['start']):.1f}s end={float(s['end']):.1f}s: {s['transcript']}"
256
+ if youtube_link != None:
257
+ transcript_summary += f" {add_query_parameter(youtube_link, {'t':str(int(s['start']))})}"
258
+
259
+ st.write(transcript_summary)
260
+ progress_text = f"Processing {i+1}/{len(sp_chunks_loaded)}..."
261
+ my_bar.progress((i+1)/len(sp_chunks_loaded), text=progress_text)
262
+
263
+ transcript_json = sp_chunks_loaded
264
+ transcript_path = f'example-transcript.json'
265
+
266
+ else:
267
+ sp_chunks_updated = []
268
+ for i,s in enumerate(sp_chunks[:limit]):
269
+ if s['duration'] > 0.1:
270
+ audio_path = s['audio'].export('temp.wav',format='wav')
271
+ try:
272
+ transcript = openai.Audio.transcribe("whisper-1", audio_path)['text']
273
+ except Exception:
274
+ transcript = ''
275
+ pass
276
+
277
+ if transcript !='' and transcript != None:
278
+ s['transcript'] = transcript
279
+ transcript_summary = f"{s['speaker']} start={s['start']:.1f}s end={s['end']:.1f}s : {s['transcript']}"
280
+ if youtube_link != None:
281
+ transcript_summary += f" {add_query_parameter(youtube_link, {'t':str(int(s['start']))})}"
282
+
283
+ sp_chunks_updated.append({'speaker':s['speaker'],
284
+ 'start':s['start'], 'end':s['end'],
285
+ 'duration': s['duration'],'transcript': transcript})
286
+
287
+ progress_text = f"Processing {i+1}/{len(sp_chunks[:limit])}..."
288
+ my_bar.progress((i+1)/len(sp_chunks[:limit]), text=progress_text)
289
+ st.write(transcript_summary)
290
+
291
+ transcript_json = [dict((k, d[k]) for k in ['speaker','start','end','duration','transcript'] if k in d) for d in sp_chunks_updated]
292
+ transcript_path = f'{audio_name.split(".")[0]}-transcript.json'
293
+
294
+ with open(transcript_path,'w') as f:
295
+ json.dump(transcript_json, f)
296
+
297
+ with container:
298
+ st.info(f'Completed transcribing')
299
+
300
+ @st.cache_data
301
+ def convert_df(string):
302
+ # IMPORTANT: Cache the conversion to prevent computation on every rerun
303
+ return string.encode('utf-8')
304
+
305
+ transcript_json_download = convert_df(json.dumps(transcript_json))
306
+
307
+ c1_b,c2_b = st.columns((1,2))
308
+ with c1_b:
309
+ ste.download_button(
310
+ "Download transcript as json",
311
+ transcript_json_download,
312
+ transcript_path,
313
+ )
314
+
315
+ header = ','.join(transcript_json[0].keys()) + '\n'
316
+ for s in transcript_json:
317
+ header += ','.join([str(e) if ',' not in str(e) else '"' + str(e) + '"' for e in s.values()]) + '\n'
318
+
319
+ transcript_csv_download = convert_df(header)
320
+ with c2_b:
321
+ ste.download_button(
322
+ "Download transcript as csv",
323
+ transcript_csv_download,
324
+ f'{audio_name.split(".")[0]}-transcript.csv'
325
+ )
326
+
example/steve a smith jokes.json ADDED
@@ -0,0 +1 @@
 
 
1
+ [{"speaker": "SPEAKER_02", "start": 1.0101867572156198, "end": 17.173174872665534, "duration": 16.162988115449913, "transcript": "You can call it day two but it's really day one because the full two-hour show with me and the man Shannon Sharpless. I know it was Hiccup last year, I know it was Hiccup yesterday Shannon, I know it was Hiccup yesterday. Alright, I mean, cause you called me something other than Stephen A, but I got a solution to the problem. "}, {"speaker": "SPEAKER_01", "start": 12.589134125636674, "end": 13.607809847198643, "duration": 1.0186757215619693, "transcript": "I don't think I'm going to stay forever."}, {"speaker": "SPEAKER_02", "start": 18.3276740237691, "end": 19.41426146010187, "duration": 1.0865874363327706, "transcript": "Ah, there we go!"}, {"speaker": "SPEAKER_00", "start": 19.41426146010187, "end": 19.92359932088285, "duration": 0.5093378607809811, "transcript": "Yo!"}, {"speaker": "SPEAKER_02", "start": 19.92359932088285, "end": 20.04244482173175, "duration": 0.11884550084889867, "transcript": "You"}, {"speaker": "SPEAKER_00", "start": 20.449915110356535, "end": 25.679117147707984, "duration": 5.229202037351449, "transcript": "What's up, y'all? My name is Steven A. What's up, baby? And you are Molly Carol."}, {"speaker": "SPEAKER_03", "start": 25.679117147707984, "end": 28.599320882852297, "duration": 2.9202037351443124, "transcript": "Yes, you said it right, day two."}, {"speaker": "SPEAKER_00", "start": 26.35823429541596, "end": 26.56196943972835, "duration": 0.2037351443123896, "transcript": "Thank you."}, {"speaker": "SPEAKER_00", "start": 28.3616298811545, "end": 30.11035653650255, "duration": 1.748726655348051, "transcript": "It only took me seven years. You know what I..."}, {"speaker": "SPEAKER_03", "start": 29.38030560271647, "end": 34.03225806451613, "duration": 4.6519524617996595, "transcript": "Stephen A is still learning. Karam, good job. It rhymes with harem. Karam. "}, {"speaker": "SPEAKER_01", "start": 31.876061120543298, "end": 32.826825127334466, "duration": 0.950764006791168, "transcript": "Good job. It rhymes with."}, {"speaker": "SPEAKER_01", "start": 33.09847198641766, "end": 40.36502546689304, "duration": 7.26655348047538, "transcript": "She said it rhymes with harem. We just called you cute. She had to tell the world it rhymes with harem? I'm just wondering."}, {"speaker": "SPEAKER_03", "start": 37.68251273344652, "end": 38.1578947368421, "duration": 0.47538200339558045, "transcript": "because she had."}, {"speaker": "SPEAKER_03", "start": 40.36502546689304, "end": 41.34974533106961, "duration": 0.9847198641765758, "transcript": "That's what it rhymes with."}, {"speaker": "SPEAKER_02", "start": 42.74193548387097, "end": 49.278438030560274, "duration": 6.536502546689306, "transcript": "Thanks for watching ESPN on YouTube. For live streaming sports and premium content, subscribe to ESPN Plus."}]
example/steve a smith jokes.mp4 ADDED
Binary file (302 kB). View file
 
example/steve a smith jokes.rttm ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ SPEAKER stream 1 1.010 16.163 <NA> <NA> SPEAKER_02 <NA> <NA>
2
+ SPEAKER stream 1 12.589 1.019 <NA> <NA> SPEAKER_01 <NA> <NA>
3
+ SPEAKER stream 1 18.328 1.087 <NA> <NA> SPEAKER_02 <NA> <NA>
4
+ SPEAKER stream 1 19.414 0.509 <NA> <NA> SPEAKER_00 <NA> <NA>
5
+ SPEAKER stream 1 19.924 0.119 <NA> <NA> SPEAKER_02 <NA> <NA>
6
+ SPEAKER stream 1 20.093 0.068 <NA> <NA> SPEAKER_02 <NA> <NA>
7
+ SPEAKER stream 1 20.331 0.119 <NA> <NA> SPEAKER_02 <NA> <NA>
8
+ SPEAKER stream 1 20.450 5.229 <NA> <NA> SPEAKER_00 <NA> <NA>
9
+ SPEAKER stream 1 22.759 0.187 <NA> <NA> SPEAKER_02 <NA> <NA>
10
+ SPEAKER stream 1 22.997 0.102 <NA> <NA> SPEAKER_02 <NA> <NA>
11
+ SPEAKER stream 1 25.679 2.920 <NA> <NA> SPEAKER_03 <NA> <NA>
12
+ SPEAKER stream 1 26.358 0.204 <NA> <NA> SPEAKER_00 <NA> <NA>
13
+ SPEAKER stream 1 28.362 1.749 <NA> <NA> SPEAKER_00 <NA> <NA>
14
+ SPEAKER stream 1 29.380 4.652 <NA> <NA> SPEAKER_03 <NA> <NA>
15
+ SPEAKER stream 1 30.110 0.051 <NA> <NA> SPEAKER_01 <NA> <NA>
16
+ SPEAKER stream 1 30.161 0.034 <NA> <NA> SPEAKER_00 <NA> <NA>
17
+ SPEAKER stream 1 30.195 0.034 <NA> <NA> SPEAKER_01 <NA> <NA>
18
+ SPEAKER stream 1 30.229 0.034 <NA> <NA> SPEAKER_00 <NA> <NA>
19
+ SPEAKER stream 1 30.263 0.017 <NA> <NA> SPEAKER_01 <NA> <NA>
20
+ SPEAKER stream 1 30.280 0.017 <NA> <NA> SPEAKER_00 <NA> <NA>
21
+ SPEAKER stream 1 31.146 0.340 <NA> <NA> SPEAKER_01 <NA> <NA>
22
+ SPEAKER stream 1 31.486 0.119 <NA> <NA> SPEAKER_00 <NA> <NA>
23
+ SPEAKER stream 1 31.825 0.051 <NA> <NA> SPEAKER_00 <NA> <NA>
24
+ SPEAKER stream 1 31.876 0.951 <NA> <NA> SPEAKER_01 <NA> <NA>
25
+ SPEAKER stream 1 33.098 7.267 <NA> <NA> SPEAKER_01 <NA> <NA>
26
+ SPEAKER stream 1 35.458 0.017 <NA> <NA> SPEAKER_03 <NA> <NA>
27
+ SPEAKER stream 1 35.475 0.051 <NA> <NA> SPEAKER_00 <NA> <NA>
28
+ SPEAKER stream 1 35.526 0.051 <NA> <NA> SPEAKER_03 <NA> <NA>
29
+ SPEAKER stream 1 36.545 0.085 <NA> <NA> SPEAKER_02 <NA> <NA>
30
+ SPEAKER stream 1 37.632 0.051 <NA> <NA> SPEAKER_02 <NA> <NA>
31
+ SPEAKER stream 1 37.683 0.475 <NA> <NA> SPEAKER_03 <NA> <NA>
32
+ SPEAKER stream 1 40.229 0.085 <NA> <NA> SPEAKER_03 <NA> <NA>
33
+ SPEAKER stream 1 40.365 0.985 <NA> <NA> SPEAKER_03 <NA> <NA>
34
+ SPEAKER stream 1 42.742 6.537 <NA> <NA> SPEAKER_02 <NA> <NA>
packages.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ ffmpeg
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ openai==0.28.1
2
+ openai-whisper==20230918
3
+ pydub==0.25.1
4
+ pytube==15.0.0
5
+ streamlit==1.27.2
6
+ streamlit-ext==0.1.8
7
+ ffmpeg==1.4
8
+ pyannote.audio==3.0.1
9
+ pyannote.core==5.0.0
10
+