Neal Caren commited on
Commit
a507bd6
1 Parent(s): 72a5c50

Added some comments and medium model option.

Browse files
Files changed (1) hide show
  1. app.py +30 -13
app.py CHANGED
@@ -10,11 +10,14 @@ import tempfile
10
 
11
 
12
  def create_download_link(val, filename, label):
 
13
  b64 = base64.b64encode(val)
14
  return f'<a href="data:application/octet-stream;base64,{b64.decode()}" download="{filename}">{label}</a>'
15
 
16
 
17
  def segment(nu_speakers):
 
 
18
 
19
  diar = Diarizer(embed_model='ecapa',cluster_method='sc')
20
  segments = diar.diarize(temp_file, num_speakers=nu_speakers)
@@ -27,56 +30,63 @@ def segment(nu_speakers):
27
 
28
  sdf['speaker'] = sdf['label'].replace(speaker_d)
29
  return sdf
 
 
 
 
30
 
31
  def audio_to_df(uploaded):
32
- monotize(uploaded)
 
33
  model = whisper.load_model(model_size)
34
  result = model.transcribe(temp_file,
35
- without_timestamps=False,
36
- task = task)
37
  tdf = pd.DataFrame(result['segments'])
38
  return tdf
39
 
40
- def monotize(uploaded):
41
- cmd = f"ffmpeg -y -i {uploaded} -acodec pcm_s16le -ar 16000 -ac 1 {temp_file}"
42
- subprocess.Popen(cmd, shell=True).wait()
43
 
44
  def add_preface(row):
 
45
  text = row['text'].replace('\n','')
46
  speaker = row['speaker']
47
  return f'Speaker {speaker}: {text}'
48
 
49
  def transcribe(uploaded, nu_speakers):
 
50
  with st.spinner(text="Converting file..."):
51
  monotize('temp_audio')
52
 
 
53
  audio_file = open(temp_file, 'rb')
54
  audio_bytes = audio_file.read()
55
  st.audio(temp_file, format='audio/wav')
56
 
 
57
  with st.spinner(text=f"Transcribing using {model_size} model..."):
58
  tdf = audio_to_df(uploaded)
 
59
  with st.spinner(text="Segmenting..."):
60
  sdf = segment(nu_speakers)
61
 
62
- ns_list = sdf[['start','speaker']].to_dict(orient='records')
63
-
64
  # Find the nearest transcript line to the start of each speaker
 
65
  for row in ns_list:
66
  input = row['start']
67
  id = tdf.iloc[(tdf['start']-input).abs().argsort()[:1]]['id'].values[0]
68
  tdf.loc[tdf['id'] ==id, 'speaker'] = row['speaker']
69
-
70
  tdf['speaker'].fillna(method = 'ffill', inplace = True)
71
  tdf['speaker'].fillna(method = 'bfill', inplace = True)
72
-
73
  tdf['n1'] = tdf['speaker'] != tdf['speaker'].shift(1)
74
  tdf['speach'] = tdf['n1'].cumsum()
75
- binned_df = tdf.groupby(['speach', 'speaker'])['text'].apply('\n'.join).reset_index()
76
 
 
 
77
  binned_df['speaker'] = binned_df['speaker'].astype(int)
78
  binned_df['output'] = binned_df.apply(add_preface, axis=1)
79
 
 
80
  lines = []
81
  for row in binned_df['output'].values:
82
  st.write(row)
@@ -106,7 +116,7 @@ uploaded = form.file_uploader("Choose a file")
106
  nu_speakers = form.slider('Number of speakers in recording:', min_value=1, max_value=8, value=2, step=1)
107
  models = form.selectbox(
108
  'Which Whisper model?',
109
- ('Tiny (fast)', 'Base (good)', 'Small (great but slow)'), index=1)
110
  translate = form.checkbox('Translate to English?')
111
  submit = form.form_submit_button("Transcribe!")
112
 
@@ -118,6 +128,8 @@ if submit:
118
  model_size ='base'
119
  elif models == 'Small (great but slow)':
120
  model_size = 'small'
 
 
121
 
122
  if translate == True:
123
  task = 'translate'
@@ -131,13 +143,18 @@ if submit:
131
  bytes_data = uploaded.getvalue()
132
  with open('temp_audio', 'wb') as outfile:
133
  outfile.write(bytes_data)
 
 
 
134
  transcript = transcribe('temp_audio', nu_speakers)
135
 
136
- csv = transcript['df'].to_csv( float_format='%.2f', index=False).encode('utf-8')
137
  text = '\n'.join(transcript['text']).encode('utf-8')
138
  download_url = create_download_link(text, 'transcript.txt', 'Download transcript as plain text.')
139
  st.markdown(download_url, unsafe_allow_html=True)
140
 
 
 
141
  download_url = create_download_link(csv, 'transcript.csv', 'Download transcript as CSV (with time codes)')
142
  st.markdown(download_url, unsafe_allow_html=True)
143
  tmp_dir.cleanup()
 
10
 
11
 
12
  def create_download_link(val, filename, label):
13
+ '''Hack to have a stable download link in Streamlit'''
14
  b64 = base64.b64encode(val)
15
  return f'<a href="data:application/octet-stream;base64,{b64.decode()}" download="{filename}">{label}</a>'
16
 
17
 
18
  def segment(nu_speakers):
19
+ '''Segment the audio using simple_diarizer.
20
+ Defaults to the speechbrain ECAPA-TDNN embeddings.'''
21
 
22
  diar = Diarizer(embed_model='ecapa',cluster_method='sc')
23
  segments = diar.diarize(temp_file, num_speakers=nu_speakers)
 
30
 
31
  sdf['speaker'] = sdf['label'].replace(speaker_d)
32
  return sdf
33
+ def monotize(uploaded):
34
+ '''Convert the upload file to audio file.'''
35
+ cmd = f"ffmpeg -y -i {uploaded} -acodec pcm_s16le -ar 16000 -ac 1 {temp_file}"
36
+ subprocess.Popen(cmd, shell=True).wait()
37
 
38
  def audio_to_df(uploaded):
39
+ '''Turn the upload file in a segemented dataframe.'''
40
+ #monotize(uploaded)
41
  model = whisper.load_model(model_size)
42
  result = model.transcribe(temp_file,
43
+ without_timestamps=False,
44
+ task = task)
45
  tdf = pd.DataFrame(result['segments'])
46
  return tdf
47
 
48
+
 
 
49
 
50
  def add_preface(row):
51
+ ''' Add speaker prefix to transcript during transcribe().'''
52
  text = row['text'].replace('\n','')
53
  speaker = row['speaker']
54
  return f'Speaker {speaker}: {text}'
55
 
56
  def transcribe(uploaded, nu_speakers):
57
+ # Convert file to mono
58
  with st.spinner(text="Converting file..."):
59
  monotize('temp_audio')
60
 
61
+ # Make audio available to play in UI
62
  audio_file = open(temp_file, 'rb')
63
  audio_bytes = audio_file.read()
64
  st.audio(temp_file, format='audio/wav')
65
 
66
+ # trancibe file
67
  with st.spinner(text=f"Transcribing using {model_size} model..."):
68
  tdf = audio_to_df(uploaded)
69
+ # segement file
70
  with st.spinner(text="Segmenting..."):
71
  sdf = segment(nu_speakers)
72
 
 
 
73
  # Find the nearest transcript line to the start of each speaker
74
+ ns_list = sdf[['start','speaker']].to_dict(orient='records')
75
  for row in ns_list:
76
  input = row['start']
77
  id = tdf.iloc[(tdf['start']-input).abs().argsort()[:1]]['id'].values[0]
78
  tdf.loc[tdf['id'] ==id, 'speaker'] = row['speaker']
 
79
  tdf['speaker'].fillna(method = 'ffill', inplace = True)
80
  tdf['speaker'].fillna(method = 'bfill', inplace = True)
 
81
  tdf['n1'] = tdf['speaker'] != tdf['speaker'].shift(1)
82
  tdf['speach'] = tdf['n1'].cumsum()
 
83
 
84
+ # collaps the dataframe by speach turn.
85
+ binned_df = tdf.groupby(['speach', 'speaker'])['text'].apply('\n'.join).reset_index()
86
  binned_df['speaker'] = binned_df['speaker'].astype(int)
87
  binned_df['output'] = binned_df.apply(add_preface, axis=1)
88
 
89
+ # Display the transcript and prepare for export
90
  lines = []
91
  for row in binned_df['output'].values:
92
  st.write(row)
 
116
  nu_speakers = form.slider('Number of speakers in recording:', min_value=1, max_value=8, value=2, step=1)
117
  models = form.selectbox(
118
  'Which Whisper model?',
119
+ ('Tiny (fast)', 'Base (good)', 'Small (great but slow)', 'Medium (greater but slower)'), index=1)
120
  translate = form.checkbox('Translate to English?')
121
  submit = form.form_submit_button("Transcribe!")
122
 
 
128
  model_size ='base'
129
  elif models == 'Small (great but slow)':
130
  model_size = 'small'
131
+ elif models == 'Medium (greater but slower)':
132
+ model_size = 'medium'
133
 
134
  if translate == True:
135
  task = 'translate'
 
143
  bytes_data = uploaded.getvalue()
144
  with open('temp_audio', 'wb') as outfile:
145
  outfile.write(bytes_data)
146
+
147
+
148
+ # Transcribe/translate and segment
149
  transcript = transcribe('temp_audio', nu_speakers)
150
 
151
+ # Prepare text file for export.
152
  text = '\n'.join(transcript['text']).encode('utf-8')
153
  download_url = create_download_link(text, 'transcript.txt', 'Download transcript as plain text.')
154
  st.markdown(download_url, unsafe_allow_html=True)
155
 
156
+ # prepare CSV file for expport.
157
+ csv = transcript['df'].to_csv( float_format='%.2f', index=False).encode('utf-8')
158
  download_url = create_download_link(csv, 'transcript.csv', 'Download transcript as CSV (with time codes)')
159
  st.markdown(download_url, unsafe_allow_html=True)
160
  tmp_dir.cleanup()