import librosa
import matplotlib.pyplot as plt
import numpy as np

import vad.voice_activity_detect as vad


def compute_bic(mfcc_v, delta):
    """
    计算BIC值
        :param mfcc_v: 检测窗口内的mfcc数组
        :param delta: 区域
        :return:
    """
    m, n = mfcc_v.shape

    sigma0 = np.cov(mfcc_v).diagonal()
    eps = np.spacing(1)
    real_min = np.finfo(np.double).tiny
    det0 = max(np.prod(np.maximum(sigma0, eps)), real_min)

    flat_start = 5

    range_loop = range(flat_start, n, delta)
    x = np.zeros(len(range_loop))
    iter = 0
    for index in range_loop:
        part1 = mfcc_v[:, 0:index]
        part2 = mfcc_v[:, index:n]

        sigma1 = np.cov(part1).diagonal()
        sigma2 = np.cov(part2).diagonal()

        det1 = max(np.prod(np.maximum(sigma1, eps)), real_min)
        det2 = max(np.prod(np.maximum(sigma2, eps)), real_min)

        bic = 0.5*(n*np.log(det0)-index*np.log(det1)-(n-index)*np.log(det2))-0.5*(m+0.5*m*(m+1))*np.log(n)
        x[iter] = bic
        iter = iter + 1

    max_bic = x.max()
    max_index = x.argmax()
    if max_bic > 0:
        return range_loop[max_index]-1
    else:
        return -1


def speech_segmentation(mfccs):
    """
    语音分割函数
        :param mfccs: mfcc矩阵
        :return:
    """
    w_start = 0
    w_end = 200
    w_grow = 200
    delta = 25

    m, n = mfccs.shape

    # 分段计算BIC
    store_cp = []
    index = 0
    while w_end < n:
        feature_seg = mfccs[:, w_start:w_end]
        det_bic = compute_bic(feature_seg, delta)  # 计算BIC
        index = index + 1
        if det_bic > 0:
            temp = w_start + det_bic
            store_cp.append(temp)
            w_start = w_start + det_bic + 200
            w_end = w_start + w_grow
        else:
            w_end = w_end + w_grow

    return np.array(store_cp)


def multi_segmentation(file, sr, frame_size, frame_shift, plot_seg=False):
    """
    主要函数
        :param file: 文件位置
        :param sr: 目标取样率
        :param frame_size:
        :param frame_shift:
        :param plot_seg: 是否画图
        :return:
    """
    # -> 时间级数和
    y, sr = librosa.load(file, sr=sr)

    # -> mfcc数组
    mfccs = librosa.feature.mfcc(y, sr, n_mfcc=12, hop_length=frame_shift, n_fft=frame_size)
    # -> 分割数组
    seg_point = speech_segmentation(mfccs / mfccs.max())

    seg_point = seg_point * frame_shift  # 数组乘步长？
    seg_point = np.insert(seg_point, 0, 0)  # 数组头部填充0？
    seg_point = np.append(seg_point, len(y))  # 将时间级数和的长度加在数组末尾

    range_loop = range(len(seg_point) - 1)

    output_segpoint = []
    for i in range_loop:
        temp = y[seg_point[i]:seg_point[i + 1]]  # 根据分割点划分时间级数

        # 计算vad判断是否为空白点
        x1, x2 = vad.vad(temp, sr=sr, frame_size=frame_size, frame_shift=frame_shift)
        if len(x1) == 0 or len(x2) == 0:
            continue
        # elif seg_point[i + 1] == len(y):
        #     continue
        else:
            output_segpoint.append(seg_point[i + 1])

    # 画图
    if plot_seg:
        plt.figure('speech segmentation plot')
        plt.plot(np.arange(0, len(y)) / float(sr), y, "b-")

        for i in range(len(output_segpoint)):
            plt.vlines(output_segpoint[i] / float(sr), -1, 1, colors="c", linestyles="dashed")
            plt.vlines(output_segpoint[i] / float(sr), -1, 1, colors="r", linestyles="dashed")
        plt.xlabel("Time/s")
        plt.ylabel("Speech Amp")
        plt.grid(True)
        plt.show()

    return (np.asarray(output_segpoint) / float(sr))