justus-tobias commited on
Commit
e290816
·
1 Parent(s): 68cdf36

spectogram has correct length, filename displayed, average beat

Browse files
Files changed (1) hide show
  1. app.py +92 -36
app.py CHANGED
@@ -15,9 +15,9 @@ random.shuffle(all_pairs)
15
  example_pairs = [list(pair) for pair in all_pairs[:25]]
16
 
17
  # GENERAL HELPER FUNCTIONS
18
- def getaudiodata(audio:gr.Audio)->tuple[int,np.ndarray]:
19
- # Extract audio data and sample rate
20
- sr, audiodata = audio
21
 
22
  # Ensure audiodata is a numpy array
23
  if not isinstance(audiodata, np.ndarray):
@@ -70,8 +70,39 @@ def getHRV(beattimes: np.ndarray) -> np.ndarray:
70
 
71
  return instantaneous_hr
72
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
  # HELPER FUNCTIONS FOR SINGLE AUDIO ANALYSIS
74
- def plotCombined(audiodata, sr):
75
  # Get beat times
76
  tempo, beattimes = getBeats(audiodata, sr)
77
 
@@ -80,7 +111,7 @@ def plotCombined(audiodata, sr):
80
  subplot_titles=('Audio Waveform', 'Spectrogram', 'Heart Rate Variability'))
81
 
82
  # Time array for the full audio
83
- time = (np.arange(0, len(audiodata)) / sr)*2
84
 
85
  # Waveform plot
86
  fig.add_trace(
@@ -105,23 +136,30 @@ def plotCombined(audiodata, sr):
105
  )
106
 
107
  # Spectrogram plot
108
- D = librosa.stft(audiodata)
 
 
 
109
  S_db = librosa.amplitude_to_db(np.abs(D), ref=np.max)
110
- times = librosa.times_like(S_db)
111
- freqs = librosa.fft_frequencies(sr=sr)
 
 
 
112
  fig.add_trace(
113
- go.Heatmap(z=S_db, x=times, y=freqs, colorscale='Viridis',
114
- zmin=S_db.min(), zmax=S_db.max(), colorbar=dict(title='Magnitude (dB)')),
115
  row=2, col=1
116
  )
117
-
118
  # Update layout
119
  fig.update_layout(
120
  height=1000,
121
- title_text="Audio Analysis with Heart Rate Variability",
122
  showlegend=False
123
  )
124
  fig.update_xaxes(title_text="Time (s)", row=2, col=1)
 
125
  fig.update_yaxes(title_text="Amplitude", row=1, col=1)
126
  fig.update_yaxes(title_text="HRV", row=3, col=1)
127
  fig.update_yaxes(title_text="Frequency (Hz)", type="log", row=2, col=1)
@@ -167,7 +205,11 @@ def plotbeatscatter(tempo, beattimes):
167
 
168
  def analyze_single(audio:gr.Audio):
169
  # Extract audio data and sample rate
170
- sr, audiodata = getaudiodata(audio)
 
 
 
 
171
 
172
 
173
  # Now you have:
@@ -187,11 +229,21 @@ def analyze_single(audio:gr.Audio):
187
  # print(f"Mean RMS Energy: {np.mean(rms):.4f}")
188
 
189
  tempo, beattimes = getBeats(audiodata, sr)
190
- spectogram_wave = plotCombined(audiodata, sr)
191
  beats_histogram = plotbeatscatter(tempo[0], beattimes)
192
 
 
 
 
 
 
 
 
193
  # Return your analysis results
194
  results = f"""
 
 
 
195
  - Audio length: {len(audiodata) / sr:.2f} seconds
196
  - Sample rate: {sr} Hz
197
  - Mean Zero Crossing Rate: {np.mean(zcr):.4f}
@@ -201,7 +253,7 @@ def analyze_single(audio:gr.Audio):
201
  - Beat durations: {np.diff(beattimes)}
202
  - Mean Beat Duration: {np.mean(np.diff(beattimes)):.4f}
203
  """
204
- return results, spectogram_wave, beats_histogram
205
  #-----------------------------------------------
206
  #-----------------------------------------------
207
 
@@ -297,6 +349,7 @@ with gr.Blocks() as app:
297
  with gr.Tab("Single Audio"):
298
 
299
  audiofile = gr.Audio(
 
300
  label="Audio of a Heartbeat",
301
  sources="upload")
302
 
@@ -304,9 +357,10 @@ with gr.Blocks() as app:
304
 
305
  results = gr.Markdown()
306
  spectogram_wave = gr.Plot()
 
307
  beats_histogram = gr.Plot()
308
 
309
- analyzebtn.click(analyze_single, audiofile, [results, spectogram_wave, beats_histogram])
310
 
311
  gr.Examples(
312
  examples=example_files,
@@ -321,32 +375,34 @@ with gr.Blocks() as app:
321
  - classify Beat's into S1 and S2
322
  - synthesise the mean Beat S1 & S2""")
323
 
324
- with gr.Tab("Two Audios"):
325
 
326
- with gr.Row():
327
 
328
- audioone = gr.Audio(
329
- label="Audio of a Heartbeat",
330
- sources="upload")
331
- audiotwo = gr.Audio(
332
- label="Audio of a Heartbeat",
333
- sources="upload")
 
 
334
 
335
- analyzebtn2 = gr.Button("analyze & compare")
336
 
337
- with gr.Accordion("Results",open=False):
338
- results2 = gr.Markdown()
339
- spectogram_wave2 = gr.Plot()
340
 
341
- analyzebtn2.click(analyze_double, inputs=[audioone,audiotwo], outputs=spectogram_wave2)
342
 
343
- gr.Examples(
344
- examples=example_pairs,
345
- inputs=[audioone, audiotwo],
346
- outputs=spectogram_wave2,
347
- fn=analyze_double,
348
- cache_examples=False
349
- )
350
 
351
 
352
  app.launch()
 
15
  example_pairs = [list(pair) for pair in all_pairs[:25]]
16
 
17
  # GENERAL HELPER FUNCTIONS
18
+ def getaudiodata(filepath)->tuple[int,np.ndarray]:
19
+
20
+ audiodata, sr = librosa.load(filepath, sr=None)
21
 
22
  # Ensure audiodata is a numpy array
23
  if not isinstance(audiodata, np.ndarray):
 
70
 
71
  return instantaneous_hr
72
 
73
+ def create_average_heartbeat(audiodata, sr):
74
+ # 1. Detect individual heartbeats
75
+ onset_env = librosa.onset.onset_strength(y=audiodata, sr=sr)
76
+ peaks, _ = find_peaks(onset_env, distance=sr//2) # Assume at least 0.5s between beats
77
+
78
+ # 2. Extract individual heartbeats
79
+ beat_length = sr # Assume 1 second for each beat
80
+ beats = []
81
+ for peak in peaks:
82
+ if peak + beat_length < len(audiodata):
83
+ beat = audiodata[peak:peak+beat_length]
84
+ beats.append(beat)
85
+
86
+ # 3. Align and average the beats
87
+ if beats:
88
+ avg_beat = np.mean(beats, axis=0)
89
+ else:
90
+ avg_beat = np.array([])
91
+
92
+ # 4. Create a Plotly figure of the average heartbeat
93
+ time = np.arange(len(avg_beat)) / sr
94
+ fig = go.Figure()
95
+ fig.add_trace(go.Scatter(x=time, y=avg_beat, mode='lines', name='Average Beat'))
96
+ fig.update_layout(
97
+ title='Average Heartbeat',
98
+ xaxis_title='Time (s)',
99
+ yaxis_title='Amplitude'
100
+ )
101
+
102
+ return fig, avg_beat
103
+
104
  # HELPER FUNCTIONS FOR SINGLE AUDIO ANALYSIS
105
+ def plotCombined(audiodata, sr, filename):
106
  # Get beat times
107
  tempo, beattimes = getBeats(audiodata, sr)
108
 
 
111
  subplot_titles=('Audio Waveform', 'Spectrogram', 'Heart Rate Variability'))
112
 
113
  # Time array for the full audio
114
+ time = (np.arange(0, len(audiodata)) / sr) * 2
115
 
116
  # Waveform plot
117
  fig.add_trace(
 
136
  )
137
 
138
  # Spectrogram plot
139
+ n_fft = 2048 # You can adjust this value
140
+ hop_length = n_fft // 4 # You can adjust this value
141
+
142
+ D = librosa.stft(audiodata, n_fft=n_fft, hop_length=hop_length)
143
  S_db = librosa.amplitude_to_db(np.abs(D), ref=np.max)
144
+
145
+ # Calculate the correct time array for the spectrogram
146
+ spec_times = librosa.times_like(S_db, sr=sr, hop_length=hop_length)
147
+
148
+ freqs = librosa.fft_frequencies(sr=sr, n_fft=n_fft)
149
  fig.add_trace(
150
+ go.Heatmap(z=S_db, x=spec_times, y=freqs, colorscale='Viridis',
151
+ zmin=S_db.min(), zmax=S_db.max(), colorbar=dict(title='Magnitude (dB)')),
152
  row=2, col=1
153
  )
154
+
155
  # Update layout
156
  fig.update_layout(
157
  height=1000,
158
+ title_text=filename,
159
  showlegend=False
160
  )
161
  fig.update_xaxes(title_text="Time (s)", row=2, col=1)
162
+ fig.update_xaxes(range=[0, len(audiodata)/sr], row=2, col=1)
163
  fig.update_yaxes(title_text="Amplitude", row=1, col=1)
164
  fig.update_yaxes(title_text="HRV", row=3, col=1)
165
  fig.update_yaxes(title_text="Frequency (Hz)", type="log", row=2, col=1)
 
205
 
206
  def analyze_single(audio:gr.Audio):
207
  # Extract audio data and sample rate
208
+ filepath = audio
209
+ filename = filepath.split("/")[-1]
210
+
211
+
212
+ sr, audiodata = getaudiodata(filepath)
213
 
214
 
215
  # Now you have:
 
229
  # print(f"Mean RMS Energy: {np.mean(rms):.4f}")
230
 
231
  tempo, beattimes = getBeats(audiodata, sr)
232
+ spectogram_wave = plotCombined(audiodata, sr, filename)
233
  beats_histogram = plotbeatscatter(tempo[0], beattimes)
234
 
235
+ # Add the new average heartbeat analysis
236
+ avg_beat_plot, avg_beat = create_average_heartbeat(audiodata, sr)
237
+
238
+ # Calculate some statistics about the average beat
239
+ avg_beat_duration = len(avg_beat) / sr
240
+ avg_beat_energy = np.sum(np.square(avg_beat))
241
+
242
  # Return your analysis results
243
  results = f"""
244
+ Average Heartbeat Analysis:
245
+ - Duration: {avg_beat_duration:.3f} seconds
246
+ - Energy: {avg_beat_energy:.3f}
247
  - Audio length: {len(audiodata) / sr:.2f} seconds
248
  - Sample rate: {sr} Hz
249
  - Mean Zero Crossing Rate: {np.mean(zcr):.4f}
 
253
  - Beat durations: {np.diff(beattimes)}
254
  - Mean Beat Duration: {np.mean(np.diff(beattimes)):.4f}
255
  """
256
+ return results, spectogram_wave, avg_beat_plot, beats_histogram
257
  #-----------------------------------------------
258
  #-----------------------------------------------
259
 
 
349
  with gr.Tab("Single Audio"):
350
 
351
  audiofile = gr.Audio(
352
+ type="filepath",
353
  label="Audio of a Heartbeat",
354
  sources="upload")
355
 
 
357
 
358
  results = gr.Markdown()
359
  spectogram_wave = gr.Plot()
360
+ avg_beat_plot = gr.Plot()
361
  beats_histogram = gr.Plot()
362
 
363
+ analyzebtn.click(analyze_single, audiofile, [results, spectogram_wave, avg_beat_plot, beats_histogram])
364
 
365
  gr.Examples(
366
  examples=example_files,
 
375
  - classify Beat's into S1 and S2
376
  - synthesise the mean Beat S1 & S2""")
377
 
378
+ # with gr.Tab("Two Audios"):
379
 
380
+ # with gr.Row():
381
 
382
+ # audioone = gr.Audio(
383
+ # type="filepath",
384
+ # label="Audio of a Heartbeat",
385
+ # sources="upload")
386
+ # audiotwo = gr.Audio(
387
+ # type="filepath",
388
+ # label="Audio of a Heartbeat",
389
+ # sources="upload")
390
 
391
+ # analyzebtn2 = gr.Button("analyze & compare")
392
 
393
+ # with gr.Accordion("Results",open=False):
394
+ # results2 = gr.Markdown()
395
+ # spectogram_wave2 = gr.Plot()
396
 
397
+ # analyzebtn2.click(analyze_double, inputs=[audioone,audiotwo], outputs=spectogram_wave2)
398
 
399
+ # gr.Examples(
400
+ # examples=example_pairs,
401
+ # inputs=[audioone, audiotwo],
402
+ # outputs=spectogram_wave2,
403
+ # fn=analyze_double,
404
+ # cache_examples=False
405
+ # )
406
 
407
 
408
  app.launch()