Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -160,7 +160,7 @@ def resample_waveform(waveform, original_sample_rate, target_sample_rate):
|
|
| 160 |
def split_audio(waveform, sample_rate):
|
| 161 |
segment_samples = segment_duration * sample_rate
|
| 162 |
total_samples = waveform.size(0)
|
| 163 |
-
|
| 164 |
segments = []
|
| 165 |
# If the audio is shorter than the segment duration, just use the entire audio
|
| 166 |
if total_samples <= segment_samples:
|
|
@@ -172,16 +172,41 @@ def split_audio(waveform, sample_rate):
|
|
| 172 |
segment = waveform[start:end]
|
| 173 |
segments.append(segment)
|
| 174 |
|
| 175 |
-
# Ensure we have at least one segment
|
| 176 |
-
if len(segments) == 0:
|
| 177 |
-
|
| 178 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 179 |
return segments
|
| 180 |
|
| 181 |
# def split_audio(waveform, sample_rate):
|
| 182 |
# segment_samples = segment_duration * sample_rate
|
| 183 |
# total_samples = waveform.size(0)
|
| 184 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 185 |
# segments = []
|
| 186 |
# for start in range(0, total_samples, segment_samples):
|
| 187 |
# end = start + segment_samples
|
|
|
|
| 160 |
def split_audio(waveform, sample_rate):
|
| 161 |
segment_samples = segment_duration * sample_rate
|
| 162 |
total_samples = waveform.size(0)
|
| 163 |
+
|
| 164 |
segments = []
|
| 165 |
# If the audio is shorter than the segment duration, just use the entire audio
|
| 166 |
if total_samples <= segment_samples:
|
|
|
|
| 172 |
segment = waveform[start:end]
|
| 173 |
segments.append(segment)
|
| 174 |
|
| 175 |
+
# Ensure we have at least one segment with a minimum length
|
| 176 |
+
if len(segments) == 0 or all(len(segment) < 100 for segment in segments):
|
| 177 |
+
# Create a padded segment if audio is too short
|
| 178 |
+
padded_segment = torch.zeros(segment_samples)
|
| 179 |
+
if total_samples > 0:
|
| 180 |
+
padded_segment[:total_samples] = waveform
|
| 181 |
+
segments = [padded_segment]
|
| 182 |
+
|
| 183 |
return segments
|
| 184 |
|
| 185 |
# def split_audio(waveform, sample_rate):
|
| 186 |
# segment_samples = segment_duration * sample_rate
|
| 187 |
# total_samples = waveform.size(0)
|
| 188 |
|
| 189 |
+
# segments = []
|
| 190 |
+
# # If the audio is shorter than the segment duration, just use the entire audio
|
| 191 |
+
# if total_samples <= segment_samples:
|
| 192 |
+
# segments.append(waveform)
|
| 193 |
+
# else:
|
| 194 |
+
# # Split the audio into segments of the specified duration
|
| 195 |
+
# for start in range(0, total_samples, segment_samples):
|
| 196 |
+
# end = min(start + segment_samples, total_samples)
|
| 197 |
+
# segment = waveform[start:end]
|
| 198 |
+
# segments.append(segment)
|
| 199 |
+
|
| 200 |
+
# # Ensure we have at least one segment
|
| 201 |
+
# if len(segments) == 0:
|
| 202 |
+
# segments.append(waveform)
|
| 203 |
+
|
| 204 |
+
# return segments
|
| 205 |
+
|
| 206 |
+
# def split_audio(waveform, sample_rate):
|
| 207 |
+
# segment_samples = segment_duration * sample_rate
|
| 208 |
+
# total_samples = waveform.size(0)
|
| 209 |
+
|
| 210 |
# segments = []
|
| 211 |
# for start in range(0, total_samples, segment_samples):
|
| 212 |
# end = start + segment_samples
|