File size: 14,594 Bytes
bbbf06e
f288ceb
ea7f8cc
f288ceb
 
ea7f8cc
 
 
 
 
 
 
 
f288ceb
 
 
 
 
c52f09b
bc0cb58
 
084aa80
 
f288ceb
 
084aa80
 
6cbe554
 
 
f288ceb
bc0cb58
 
f288ceb
89bd811
 
 
b604ea1
 
bbbf06e
084aa80
bbbf06e
 
 
 
084aa80
bc0cb58
f288ceb
bbbf06e
 
f288ceb
bbbf06e
 
 
 
f288ceb
bbbf06e
 
 
 
f288ceb
bbbf06e
 
 
 
 
f288ceb
ff19da3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f288ceb
bbbf06e
f288ceb
bbbf06e
084aa80
f288ceb
 
 
 
bc0cb58
b604ea1
8781620
 
 
bc0cb58
 
 
 
f288ceb
 
 
 
 
 
 
 
 
 
bc0cb58
8781620
bc0cb58
 
f288ceb
89bd811
 
 
ea7f8cc
f288ceb
8781620
 
 
b604ea1
f288ceb
 
 
 
 
 
 
 
 
 
 
 
 
bc0cb58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8781620
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b604ea1
f288ceb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8781620
 
02253c6
f288ceb
02253c6
f288ceb
8781620
 
 
 
 
 
f288ceb
 
8781620
f288ceb
 
 
8781620
 
 
 
02253c6
 
 
f288ceb
 
 
084aa80
 
bbbf06e
 
f288ceb
 
 
 
 
 
 
 
 
 
084aa80
f288ceb
084aa80
f288ceb
 
 
 
 
 
 
 
 
 
 
 
 
bbbf06e
f288ceb
 
 
 
 
 
 
bbbf06e
 
f288ceb
 
bbbf06e
 
084aa80
 
 
 
 
bc0cb58
 
 
 
 
 
 
bbbf06e
 
b604ea1
 
 
 
 
 
 
 
bbbf06e
b604ea1
 
 
 
 
 
 
02253c6
b604ea1
 
 
bbbf06e
bcee468
bbbf06e
 
 
 
 
 
 
 
 
b604ea1
bbbf06e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b604ea1
 
 
bbbf06e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
from abc import ABC, abstractmethod
from collections import Counter
from typing import Any, Iterator, List, Dict

from pprint import pprint

# Workaround for https://github.com/tensorflow/tensorflow/issues/48797
try:
    import tensorflow as tf
except ModuleNotFoundError:
    # Error handling
    pass

import torch

import ffmpeg
import numpy as np

from src.utils import format_timestamp

# Defaults for Silero
# TODO: Make these configurable?

SPEECH_TRESHOLD = 0.3
MAX_SILENT_PERIOD = 10 # seconds
MAX_MERGE_SIZE = 150 # Do not create segments larger than 2.5 minutes

# Segment padding is disabled for now
SEGMENT_PADDING_LEFT = 0 # Start detected text segment early
SEGMENT_PADDING_RIGHT = 0 # End detected segments late

# Whether to attempt to transcribe non-speech
TRANSCRIBE_NON_SPEECH = False

# Minimum size of segments to process
MIN_SEGMENT_DURATION = 1

VAD_MAX_PROCESSING_CHUNK = 60 * 60 # 60 minutes of audio

class AbstractTranscription(ABC):
    def __init__(self, segment_padding_left: int = None, segment_padding_right = None, max_silent_period: int = None, max_merge_size: int = None, transcribe_non_speech: bool = False):
        self.sampling_rate = 16000
        self.segment_padding_left = segment_padding_left
        self.segment_padding_right = segment_padding_right
        self.max_silent_period = max_silent_period
        self.max_merge_size = max_merge_size
        self.transcribe_non_speech = transcribe_non_speech

    def get_audio_segment(self, str, start_time: str = None, duration: str = None):
        return load_audio(str, self.sampling_rate, start_time, duration)

    @abstractmethod
    def get_transcribe_timestamps(self, audio: str):
        """
        Get the start and end timestamps of the sections that should be transcribed by this VAD method.

        Parameters
        ----------
        audio: str
            The audio file.

        Returns
        -------
        A list of start and end timestamps, in fractional seconds.
        """
        return 

    def transcribe(self, audio: str, whisperCallable):
        """
        Transcribe the given audo file.

        Parameters
        ----------
        audio: str
            The audio file.

        whisperCallable: Callable[[Union[str, np.ndarray, torch.Tensor]], dict[str, Union[dict, Any]]]
            The callback that is used to invoke Whisper on an audio file/buffer.

        Returns
        -------
        A list of start and end timestamps, in fractional seconds.
        """

        # get speech timestamps from full audio file
        seconds_timestamps = self.get_transcribe_timestamps(audio)

        padded = self.pad_timestamps(seconds_timestamps, self.segment_padding_left, self.segment_padding_right)
        merged = self.merge_timestamps(padded, self.max_silent_period, self.max_merge_size)

        print("Timestamps:")
        pprint(merged)

        if self.transcribe_non_speech:
            max_audio_duration = get_audio_duration(audio)

            # Expand segments to include the gaps between them
            merged = self.expand_gaps(merged, total_duration=max_audio_duration)

            print("Transcribing non-speech:")
            pprint(merged)

        result = {
            'text': "",
            'segments': [],
            'language': ""
        }
        languageCounter = Counter()

        # For each time segment, run whisper
        for segment in merged:
            segment_start = segment['start']
            segment_end = segment['end']
            segment_expand_amount = segment.get('expand_amount', 0)

            segment_duration = segment_end - segment_start

            if segment_duration < MIN_SEGMENT_DURATION:
                continue;

            segment_audio = self.get_audio_segment(audio, start_time = str(segment_start), duration = str(segment_duration))

            print("Running whisper from ", format_timestamp(segment_start), " to ", format_timestamp(segment_end), ", duration: ", segment_duration, "expanded: ", segment_expand_amount)
            segment_result = whisperCallable(segment_audio)

            adjusted_segments = self.adjust_timestamp(segment_result["segments"], adjust_seconds=segment_start, max_source_time=segment_duration)

            # Append to output
            result['text'] += segment_result['text']
            result['segments'].extend(adjusted_segments)

            # Increment detected language
            languageCounter[segment_result['language']] += 1

        if len(languageCounter) > 0:
            result['language'] = languageCounter.most_common(1)[0][0]

        return result
            
    def include_gaps(self, segments: Iterator[dict], min_gap_length: float, total_duration: float):
        result = []
        last_end_time = 0

        for segment in segments:
            segment_start = float(segment['start'])
            segment_end = float(segment['end'])

            if (last_end_time != segment_start):
                delta = segment_start - last_end_time

                if (min_gap_length is None or delta >= min_gap_length):
                    result.append( { 'start': last_end_time, 'end': segment_start, 'gap': True } )
            
            last_end_time = segment_end
            result.append(segment)

        # Also include total duration if specified
        if (total_duration is not None and last_end_time < total_duration):
            delta = total_duration - segment_start

            if (min_gap_length is None or delta >= min_gap_length):
                result.append( { 'start': last_end_time, 'end': total_duration, 'gap': True } )

        return result

    # Expand the end time of each segment to the start of the next segment
    def expand_gaps(self, segments: List[Dict[str, Any]], total_duration: float):
        result = []

        if len(segments) == 0:
            return result

        # Add gap at the beginning if needed
        if (segments[0]['start'] > 0):
            result.append({ 'start': 0, 'end': segments[0]['start'], 'gap': True } )

        for i in range(len(segments) - 1):
            current_segment = segments[i]
            next_segment = segments[i + 1]

            delta = next_segment['start'] - current_segment['end']

            # Expand if the gap actually exists
            if (delta >= 0):
                current_segment = current_segment.copy()
                current_segment['expand_amount'] = delta
                current_segment['end'] = next_segment['start']
            
            result.append(current_segment)

        last_segment = result[-1]

        # Also include total duration if specified
        if (total_duration is not None):
            last_segment = result[-1]

            if (last_segment['end'] < total_duration):
                last_segment = last_segment.copy()
                last_segment['end'] = total_duration
                result[-1] = last_segment

        return result

    def adjust_timestamp(self, segments: Iterator[dict], adjust_seconds: float, max_source_time: float = None):
        result = []

        for segment in segments:
            segment_start = float(segment['start'])
            segment_end = float(segment['end'])

            # Filter segments?
            if (max_source_time is not None):
                if (segment_start > max_source_time):
                    continue
                segment_end = min(max_source_time, segment_end)

                new_segment = segment.copy()

            # Add to start and end
            new_segment['start'] = segment_start + adjust_seconds
            new_segment['end'] = segment_end + adjust_seconds
            result.append(new_segment)
        return result

    def pad_timestamps(self, timestamps: List[Dict[str, Any]], padding_left: float, padding_right: float):
        if (padding_left == 0 and padding_right == 0):
            return timestamps
        
        result = []
        prev_entry = None

        for i in range(len(timestamps)):
            curr_entry = timestamps[i]
            next_entry = timestamps[i + 1] if i < len(timestamps) - 1 else None

            segment_start = curr_entry['start']
            segment_end = curr_entry['end']

            if padding_left is not None:
                segment_start = max(prev_entry['end'] if prev_entry else 0, segment_start - padding_left)
            if padding_right is not None:
                segment_end = segment_end + padding_right

                # Do not pad past the next segment
                if (next_entry is not None):
                    segment_end = min(next_entry['start'], segment_end)

            new_entry = { 'start': segment_start, 'end': segment_end }
            prev_entry = new_entry
            result.append(new_entry)

        return result

    def merge_timestamps(self, timestamps: List[Dict[str, Any]], max_merge_gap: float, max_merge_size: float):
        if max_merge_gap is None:
            return timestamps

        result = []
        current_entry = None

        for entry in timestamps:
            if current_entry is None:
                current_entry = entry
                continue

            # Get distance to the previous entry
            distance = entry['start'] - current_entry['end']
            current_entry_size = current_entry['end'] - current_entry['start']

            if distance <= max_merge_gap and (max_merge_size is None or current_entry_size <= max_merge_size):
                # Merge
                current_entry['end'] = entry['end']
            else:
                # Output current entry
                result.append(current_entry)
                current_entry = entry
        
        # Add final entry
        if current_entry is not None:
            result.append(current_entry)

        return result

    def multiply_timestamps(self, timestamps: List[Dict[str, Any]], factor: float):
        result = []

        for entry in timestamps:
            start = entry['start']
            end = entry['end']

            result.append({
                'start': start * factor,
                'end': end * factor
            })
        return result

class VadSileroTranscription(AbstractTranscription):
    def __init__(self, segment_padding_left=SEGMENT_PADDING_LEFT, segment_padding_right=SEGMENT_PADDING_RIGHT, 
                 max_silent_period=MAX_SILENT_PERIOD, max_merge_size=MAX_MERGE_SIZE, transcribe_non_speech: bool = False, 
                 copy = None):
        super().__init__(segment_padding_left=segment_padding_left, segment_padding_right=segment_padding_right, 
                         max_silent_period=max_silent_period, max_merge_size=max_merge_size, transcribe_non_speech=transcribe_non_speech)

        if copy:
            self.model = copy.model
            self.get_speech_timestamps = copy.get_speech_timestamps
        else:
            self.model, utils = torch.hub.load(repo_or_dir='snakers4/silero-vad', model='silero_vad')
            (self.get_speech_timestamps, _, _, _, _) = utils

    def get_transcribe_timestamps(self, audio: str):
        audio_duration = get_audio_duration(audio)
        result = []

        # Divide procesisng of audio into chunks
        chunk_start = 0.0

        while (chunk_start < audio_duration):
            chunk_duration = min(audio_duration - chunk_start, VAD_MAX_PROCESSING_CHUNK)

            print("Processing VAD in chunk from {} to {}".format(format_timestamp(chunk_start), format_timestamp(chunk_start + chunk_duration)))
            wav = self.get_audio_segment(audio, str(chunk_start), str(chunk_duration))

            sample_timestamps = self.get_speech_timestamps(wav, self.model, sampling_rate=self.sampling_rate, threshold=SPEECH_TRESHOLD)
            seconds_timestamps = self.multiply_timestamps(sample_timestamps, factor=1 / self.sampling_rate) 
            adjusted = self.adjust_timestamp(seconds_timestamps, adjust_seconds=chunk_start, max_source_time=chunk_start + chunk_duration)

            #pprint(adjusted)

            result.extend(adjusted)
            chunk_start += chunk_duration

        return result

# A very simple VAD that just marks every N seconds as speech
class VadPeriodicTranscription(AbstractTranscription):
    def __init__(self, periodic_duration: int):
        super().__init__()
        self.periodic_duration = periodic_duration

    def get_transcribe_timestamps(self, audio: str):
        # Get duration in seconds
        audio_duration = get_audio_duration(audio)
        result = []

        # Generate a timestamp every N seconds
        start_timestamp = 0

        while (start_timestamp < audio_duration):
            end_timestamp = min(start_timestamp + self.periodic_duration, audio_duration)
            segment_duration = end_timestamp - start_timestamp

            # Minimum duration is 1 second
            if (segment_duration >= 1):
                result.append( {  'start': start_timestamp, 'end': end_timestamp } )

            start_timestamp = end_timestamp

        return result

def get_audio_duration(file: str):
    return float(ffmpeg.probe(file)["format"]["duration"])

def load_audio(file: str, sample_rate: int = 16000, 
               start_time: str = None, duration: str = None):
    """
    Open an audio file and read as mono waveform, resampling as necessary

    Parameters
    ----------
    file: str
        The audio file to open

    sr: int
        The sample rate to resample the audio if necessary

    start_time: str
        The start time, using the standard FFMPEG time duration syntax, or None to disable.
    
    duration: str
        The duration, using the standard FFMPEG time duration syntax, or None to disable.

    Returns
    -------
    A NumPy array containing the audio waveform, in float32 dtype.
    """
    try:
        inputArgs = {'threads': 0}

        if (start_time is not None):
            inputArgs['ss'] = start_time
        if (duration is not None):
            inputArgs['t'] = duration

        # This launches a subprocess to decode audio while down-mixing and resampling as necessary.
        # Requires the ffmpeg CLI and `ffmpeg-python` package to be installed.
        out, _ = (
            ffmpeg.input(file, **inputArgs)
            .output("-", format="s16le", acodec="pcm_s16le", ac=1, ar=sample_rate)
            .run(cmd="ffmpeg", capture_stdout=True, capture_stderr=True)
        )
    except ffmpeg.Error as e:
        raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}")

    return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0