from typing import Tuple
import pyaudio, wave
from contextlib import contextmanager
from ctypes import *
from tqdm import tqdm
import sys, os
sys.path.append(os.path.sep.join(os.path.abspath(__file__).split(os.path.sep)[:-2]))
import time, math
import numpy as np
# from matplotlib import pyplot as plt
from dialog.threading_utils import ControllableThread
from snowboydecoder import RingBuffer

def py_error_handler(filename, line, function, err, fmt):
    pass

ERROR_HANDLER_FUNC = CFUNCTYPE(None, c_char_p, c_int, c_char_p, c_int, c_char_p)

c_error_handler = ERROR_HANDLER_FUNC(py_error_handler)

@contextmanager
def no_alsa_error():
    try:
        asound = cdll.LoadLibrary('libasound.so')
        asound.snd_lib_error_set_handler(c_error_handler)
        yield
        asound.snd_lib_error_set_handler(None)
    except:
        yield
        pass

# 录音
def record_audio(wave_out_path='input.wav', record_second=3) -> None:
    CHUNK = 1024
    FORMAT = pyaudio.paInt16
    CHANNELS = 1
    RATE = 16000
    with no_alsa_error():
        p = pyaudio.PyAudio()
    stream = p.open(format=FORMAT,
                    channels=CHANNELS,
                    rate=RATE,
                    input=True,
                    frames_per_buffer=CHUNK)
    wf = wave.open(wave_out_path, 'wb')
    wf.setnchannels(CHANNELS)
    wf.setsampwidth(p.get_sample_size(FORMAT))
    wf.setframerate(RATE)
    print("* recording")
    for i in tqdm(range(0, int(RATE / CHUNK * record_second))):
        data = stream.read(CHUNK)
        wf.writeframes(data)
    print("* done recording")
    stream.stop_stream()
    stream.close()
    p.terminate()
    wf.close()

# 播放
def play_audio(wave_path) -> None:
    CHUNK = 1024
    wf = wave.open(wave_path, 'rb')
    # instantiate PyAudio (1)
    with no_alsa_error():
        p = pyaudio.PyAudio()
    # open stream (2)
    stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
                    channels=wf.getnchannels(),
                    rate=wf.getframerate(),
                    output=True)
    # read data
    data = wf.readframes(CHUNK)
    # play stream (3)
    datas = []
    while len(data) > 0:
        data = wf.readframes(CHUNK)
        datas.append(data)
    for d in tqdm(datas):
        stream.write(d)
    # stop stream (4)
    stream.stop_stream()
    stream.close()
    # close PyAudio (5)
    p.terminate()

# 回调播放
def play_audio_callback(wave_path):
    '''
    执行其他程序时播放音频
    '''
    CHUNK = 1024
    wf = wave.open(wave_path, 'rb')
    # instantiate PyAudio (1)
    with no_alsa_error():
        p = pyaudio.PyAudio()
    def callback(in_data, frame_count, time_info, status):
        data = wf.readframes(frame_count)
        return (data, pyaudio.paContinue)
    # open stream (2)
    stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
                    channels=wf.getnchannels(),
                    rate=wf.getframerate(),
                    output=True,
                    stream_callback=callback)
    # read data
    stream.start_stream()
    while stream.is_active():
        time.sleep(0.1)
    # stop stream (4)
    stream.stop_stream()
    stream.close()
    # close PyAudio (5)
    p.terminate()

# 获得设备信息
def get_audio_devices() -> list:
    with no_alsa_error():
        p = pyaudio.PyAudio()
    devices = []
    for i in range(p.get_device_count()):
        print(p.get_device_info_by_index(i).get('name'))
        devices.append(p.get_device_info_by_index(i))
    p.terminate()
    return devices

# # 绘图
# def show_wave(data, time_length, title=None) -> None:
#     data_length = len(data)
#     time_stamps = np.linspace(0, time_length, data_length)
#     # 不同窗口显示不同的图
#     plt.figure()
#     plt.plot(time_stamps, data)
#     if title:
#         plt.title(title)
#     plt.xlabel("time step / s")
#     plt.ylabel("amplitude")
#     plt.grid(True)
#     plt.show()

# 语音活动检测类
class VAD(ControllableThread):
    SAMPLE_RATE = 16000
    FORMAT = pyaudio.paInt16
    CHANNELS = 1
    FRAMES_PER_BUFFER = 2048
    SILENCE_TIMEOUT = 4
    MAX_EN = 2000
    def __init__(self, initialize=False, *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)
        self._ring_buffer = RingBuffer(self.SAMPLE_RATE * self.CHANNELS * self.SILENCE_TIMEOUT)
        self._thresholds = self.initialize() if initialize else {"stam": [50, 20], "zrc": [60, 40]}
        self.is_active = False

    # 初始化
    def initialize(self, time_length=4, epochs=5) -> dict[str, list]:
        def get_thresholds(state="quiet") -> Tuple[np.float64, np.float64]:
            stam_list = []
            zcr_list = []
            for i in range(epochs):
                print(f"Epoch[{i+1}/{epochs}]------ keep {state}({time_length}s) ------")
                silence_time = 0.
                while True:
                    data = self._ring_buffer.get()
                    if len(data) == 0:
                        time.sleep(0.03)
                        continue
                    np_data = np.frombuffer(data, dtype='<i2')
                    np_data = np_data * 1.0 / self.MAX_EN
                    stam_value, stams = self.stam(np_data, 256, 128)
                    zcr_value, zcrs = self.zcr(np_data, 256, 128)
                    stam_list.append(stam_value)
                    zcr_list.append(zcr_value)

                    if silence_time >= time_length:
                        break
                    else:
                        silence_time = silence_time + len(np_data) / self.SAMPLE_RATE
            return np.array(stam_list).mean(), np.array(zcr_list).mean()

        def audio_callback(in_data, frame_count, time_info, status):
            self._ring_buffer.extend(in_data)
            play_data = chr(0) * len(in_data)
            return play_data, pyaudio.paContinue

        with no_alsa_error():
            p = pyaudio.PyAudio()
        stream = p.open(
                    rate=self.SAMPLE_RATE,
                    format=self.FORMAT,
                    channels=self.CHANNELS,
                    input=True,
                    frames_per_buffer=self.FRAMES_PER_BUFFER,
                    stream_callback=audio_callback)

        thresholds = {"stam": [], "zcr": []}
        stam_quiet, zcr_quiet = get_thresholds("quiet")
        stam_speak, zrc_speak = get_thresholds("speak")
        thresholds["stam"].extend([0.8*stam_speak, 1.2*stam_quiet])
        thresholds["zcr"].extend([0.8*zrc_speak, 1.2*zcr_quiet])
        
        stream.stop_stream()
        stream.close()
        p.terminate()    
        
        return thresholds

    # 短时过零率
    def zcr(self, wave_data, window_width, overlap, delta=None) -> Tuple[np.float64, np.ndarray]:
        '''返回一段音频中系列帧的zcr之和以及系列帧的zcr
        '''
        stamp = window_width - overlap
        frame_num = math.ceil(len(wave_data) / stamp)
        zcrs = np.zeros((frame_num, ))
        for i in range(frame_num):
            frame = wave_data[i * stamp: i * stamp + frame_num]
            # 减去平均值，避免直流偏置影响
            frame = (frame - delta) if delta else (frame - np.mean(frame)) # zero-justified
            zcrs[i] = sum(frame[0:-1] * frame[1::] <= 0)
        return sum(zcrs), zcrs

    # 短时平均幅度
    def stam(self, wave_data, window_width, overlap) -> Tuple[np.float64, np.ndarray]:
        '''返回一段音频中系列帧的stam之和以及系列帧的stam
        '''
        step = window_width - overlap
        frame_num = math.ceil(len(wave_data) / step)
        stams = np.zeros((frame_num, ))
        for i in range(frame_num):
            frame = wave_data[i * step: i * step + frame_num]
            stams[i] = np.square(frame).mean()
        return sum(stams), stams

    # 是否有人说话
    def is_speaking(self):
        self.is_active  = True
        silence_time = 0.
        count = 0

        def audio_callback(in_data, frame_count, time_info, status):
            self._ring_buffer.extend(in_data)
            play_data = chr(0) * len(in_data)
            return play_data, pyaudio.paContinue

        with no_alsa_error():
            p = pyaudio.PyAudio()
        stream = p.open(
                    rate=self.SAMPLE_RATE,
                    format=self.FORMAT,
                    channels=self.CHANNELS,
                    input=True,
                    frames_per_buffer=self.FRAMES_PER_BUFFER,
                    stream_callback=audio_callback
                    )
        
        while True:
            data = self._ring_buffer.get()
            if len(data) == 0:
                time.sleep(0.03)
                continue
            np_data = np.frombuffer(data, dtype='<i2')
            np_data = np_data * 1.0 / self.MAX_EN
            stam_value, stams = self.stam(np_data, 256, 128)
            zcr_value, zcrs = self.zcr(np_data, 256, 128)
            if len(np_data) != self.FRAMES_PER_BUFFER:
                ratio = len(np_data) / self.FRAMES_PER_BUFFER
                stam_value, zcr_value = stam_value / ratio, zcr_value / ratio
            
            # 满足能量阈值或者过零率阈值
            if stam_value > self._thresholds["stam"][0] or \
                (zcr_value > self._thresholds["zrc"][0] and stam_value > self._thresholds["stam"][1]):
                count = count + 1
            else:
                silence_time = silence_time + len(np_data) / self.SAMPLE_RATE
            if count >= 3:
                silence_time = 0
                count = 0
            # 长时间没有说话，退出
            if silence_time >= self.SILENCE_TIMEOUT:
                self.is_active  = False
                self.suspend()
                break

            # print(f"stam_value: {stam_value}, zcr_value: {zcr_value}")
            # print(f"silence_time: {silence_time}, is_active: {self.is_active}")
            
        stream.stop_stream()
        stream.close()
        p.terminate()

    # 主运行函数
    def run(self):
        while self.is_running:
            # 清空
            self._ring_buffer.get()
            self._flag.wait()
            if not self.is_running:
                break
            self.is_speaking()

if __name__ == '__main__':
    # record_audio()
    vad = VAD()
    vad.start()
    vad.resume()
    while True:
        print(f"is speaking: {vad.is_active}")
        if not vad.is_active:
            print("no voice activity detected, finished.")
            print("vad will resume 3s later...")
            time.sleep(3)
            vad.resume()
    vad.stop()
    print(f"Done")
    # devices = get_audio_devices()
    # print(f"{devices}\nDone")