import collections
import contextlib
import sys
import wave
import os
# from shotembedding import pubfunc
import pubfunc
import webrtcvad
 
# 通过webrtc_vad来进行音频文件的切分
FRAME_DURATION_MS = 20
class vadDetect():
    def __init__(self, path):
        # self.audio = audio
        self.path = path

    def read_wave(self, path):
        with contextlib.closing(wave.open(path, 'rb')) as wf:
            num_channels = wf.getnchannels()
            assert num_channels == 1
            sample_width = wf.getsampwidth()
            assert sample_width == 2
            sample_rate = wf.getframerate()
            # 必需设定采样率为32000，否则会计算时间出误差
            assert sample_rate == 32000
            pcm_data = wf.readframes(wf.getnframes())
            return pcm_data, sample_rate
    
    def write_wave(self, path, audio, sample_rate):
        with contextlib.closing(wave.open(path, 'wb')) as wf:
            wf.setnchannels(1)
            wf.setsampwidth(2)
            wf.setframerate(sample_rate)
            wf.writeframes(audio)
    class Frame(object):
        def __init__(self, bytes, timestamp, duration):
            self.bytes = bytes
            self.timestamp = timestamp
            self.duration = duration
    
    def frame_generator(self, frame_duration_ms, audio, sample_rate):
        
        n = int(sample_rate * (frame_duration_ms / 1000.0) * 2)
        offset = 0
        timestamp = 0.0
        duration = (float(n) / sample_rate) / 2.0
        while offset + n < len(audio):
            yield self.Frame(audio[offset:offset + n], timestamp, duration)
            timestamp += duration
            offset += n
    
    def vad_collector(self, sample_rate, frame_duration_ms,
                    padding_duration_ms, vad, frames):
        try:
            num_padding_frames = int(padding_duration_ms / frame_duration_ms)
            ring_buffer = collections.deque(maxlen=num_padding_frames)
            triggered = False
            voiced_frames = []
            start = 0
            end = 0
            for frame in frames:
                # sys.stdout.write(
                #     '1' if vad.is_speech(frame.bytes, sample_rate) else '0')
                end = frame.timestamp + frame.duration
                if not triggered:
                    ring_buffer.append(frame)
                    num_voiced = len([f for f in ring_buffer
                                    if vad.is_speech(f.bytes, sample_rate)])
                    if num_voiced > 0.9 * ring_buffer.maxlen:
                        # sys.stdout.write('+(%s)' % (ring_buffer[0].timestamp,))
                        start = ring_buffer[0].timestamp
                        triggered = True
                        voiced_frames.extend(ring_buffer)
                        ring_buffer.clear()
                else:
                    voiced_frames.append(frame)
                    ring_buffer.append(frame)
                    num_unvoiced = len([f for f in ring_buffer
                                        if not vad.is_speech(f.bytes, sample_rate)])
                    if num_unvoiced > 0.9 * ring_buffer.maxlen:
                        # sys.stdout.write('-(%s)' % (frame.timestamp + frame.duration))
                        # 在此把一个截断的最后一个时间戳放入到end中
                        triggered = False
                        bframes = b''.join([f.bytes for f in voiced_frames])
                        # yield b''.join([f.bytes for f in voiced_frames])
                        yield bframes, start, end
                        start = end
                        ring_buffer.clear()
                        voiced_frames = []
            # if triggered:
            #     sys.stdout.write('-(%s)' % (frame.timestamp + frame.duration))
            # sys.stdout.write('\n')
            if voiced_frames:
                bframes = b''.join([f.bytes for f in voiced_frames])
                yield bframes, start, end
                # yield b''.join([f.bytes for f in voiced_frames])
        except Exception as e:
            print(str(e))
            return False
    
    # 分割音频文件（按时长秒)
    def splitWave(self, wavefile, seconds):
        
        audio, sample_rate = self.read_wave(wavefile)
        vad = webrtcvad.Vad(2)
        frames = self.frame_generator(FRAME_DURATION_MS, audio, sample_rate)
        frames = list(frames)

        sframes = b''
        baudio = 0
        laudio = 0
        # 按间隔300ms即可切片
        for segment, start, end in self.vad_collector(sample_rate, FRAME_DURATION_MS, 300, vad, frames):
            sframes = sframes + segment
            # 计算生成视频的长度
            baudio = start if baudio == 0 else baudio
            laudio = laudio + end -start
            if laudio > seconds:
                # print(laudio)
                chunk_path = f'{self.path}/{int(baudio*1000):0>10}_{int(end*1000):0>10}.wav'
                self.write_wave(chunk_path, sframes, sample_rate)
                baudio = 0
                laudio = 0
                sframes = b'' 
        if baudio > 0:
            chunk_path = f'{self.path}/{int(baudio*1000):0>10}_{int(end*1000):0>10}.wav'
            self.write_wave(chunk_path, sframes, sample_rate)        

        return True
    # 这是根据文件进行直接切割，不带有时间的偏移计算
    def vad(self, wavefile):
        
        audio, sample_rate = self.read_wave(wavefile)
        vad = webrtcvad.Vad(2)
        frames = self.frame_generator(FRAME_DURATION_MS, audio, sample_rate)
        frames = list(frames)

        for segment, start, end in self.vad_collector(sample_rate, FRAME_DURATION_MS, 400, vad, frames):
            chunk_path = f'{self.path}/{int(start * 1000):0>10}_{int(end * 1000):0>10}.wav'
            self.write_wave(chunk_path, segment, sample_rate)
        return True
    
 
if __name__ == '__main__':
    aus = vadDetect('/kk')
    aus.vad('/home/upload/subtask/2FB668BD0EF92D63/a1.wav')