import os
import time
import soundfile as sf

import numpy as np
from scipy.signal import welch, csd, convolve
from scipy.fft import ifft
from scipy.linalg import pinv
from datetime import datetime

from sklearn.cluster import KMeans
from librosa import feature


# 计时装饰器
def timer(func):
    def wrapper(*args, **kwargs):
        start_time = time.time()
        result = func(*args, **kwargs)
        end_time = time.time()
        print("Elapse time of {:}:  {:.2f} s.".format(func.__name__, end_time - start_time))
        return result

    return wrapper


class SFR_Base():
    def __init__(self):
        '''
        # 声压匹配法进行声场重构 #\n
        1. 通过 Rand_in 和对应的 response 数据计算 8扬声器到 8麦克风之间的传递函数及其逆传递函数；\n
        2. 通过求得的逆传递函数和目标8通道声场数据，计算扬声器应播放什么声音才能获得目标声场。\n
        :param rand_in_path: 传递函数建模时，单个扬声器播放的wav音频文件路径。
        :param response_path: 传递函数建模时，8个扬声器依次发声，8个传感器依次获得的64个声信号组成的wav音频文件。数据长度与rand_in数据长度相同。
        :param target_path: 目标8通道声场的wav音频信号。
        :param filter_len: 预设滤波器长度。
        :param beta: 正则化因子的修改。
        '''
        # 检查有效期
        self.check_EXPIRE_DAYS()

        # 创建路径
        self.temp_path = os.path.join(os.path.expanduser('~'), 'Documents', 'Bruel and Kjaer Applications',
                                      'LANXI Streaming', 'Temp')  # 软件默认路径
        try:
            os.mkdir(self.temp_path)  # 创建默认路径
        except:
            pass

    def read_wav_data(self, rand_in_path='',
                      response_path='',
                      target_path='',
                      filter_len=4800 * 3):  # 滤波器长度
        # 滤波器参数
        self.filter_len = filter_len

        rand_in, response, target = [], [], []
        try:
            rand_in, self.fs = sf.read(rand_in_path)
            self.sig_len = len(rand_in)
        except:
            print('read rand_in sound passed!')
        try:
            response = (sf.read(response_path)[0].T).reshape(8, 8, -1)
            response, self.pre_noise_len = self.Prepocess(response)  # 去掉初始背景噪声和结尾背景噪声
        except:
            print('read response sound passed!')
        try:
            target = 0.5 * sf.read(target_path)[0]
        except:
            print('read target sound passed!')
            pass
        return rand_in, response, target

    @timer
    def Prepocess(self, response):
        start_point_list = np.zeros((8, 8))
        new_response = np.zeros_like(response)
        for i in range(8):
            for j in range(8):
                data1 = response[i, j, :]
                rms_feature = feature.rms(y=data1, hop_length=128)
                cluster = KMeans(n_clusters=2, n_init=1, random_state=0)
                cluster.fit(rms_feature[:400].reshape(-1, 1))
                y_pred = cluster.labels_
                index = np.argwhere(np.abs(np.diff(y_pred)) == 1)
                start_point = (index[0, 0]) * 128
                stop_point = start_point + 960000
                win = np.zeros(len(data1))
                win[start_point:stop_point] = 1
                data2 = data1.ravel() * win
                new_response[i, j, :] = data2
                start_point_list[i, j] = index[0, 0] * 128
        return np.array(new_response), start_point_list

    @timer
    def Cal_Impulse(self, input_data, response_data, fs, filter_len, pre_noise_len, keep_filter=0.5):
        # 计算传递函数
        filter_len = filter_len
        overlap = filter_len // 2
        GXX = welch(input_data, fs, nperseg=filter_len, noverlap=overlap, axis=-1, scaling='spectrum')[1]
        GXY = [csd(input_data, out_data, fs, nperseg=filter_len, noverlap=overlap, scaling='spectrum')[1] for
               out_data in response_data]  # L*M*freq
        H = [gxy / gxx for gxx, gxy in zip(GXX, GXY)]  # L*M*freq

        H_list = list(np.array(H).transpose([2, 0, 1]))  # n_fft/2+1 * L * M
        h_list = self.symmetry_IFFT(np.array(H_list), n=filter_len).transpose([2, 1, 0])  # M * L * n_fft

        # 时域传递函数修剪
        for i in range(8):
            for j in range(8):
                cut_front = int(pre_noise_len[i, j] / fs * filter_len)
                h_list[i, j, :cut_front] = 0
                h_list[i, j, (cut_front + int(keep_filter * filter_len)):] = 0

        return H_list, h_list

    @timer
    def Cal_inversion(self, H_list, fs, pre_noise_len, beta=8e-5, keep_filter=0.4):
        filter_len = (len(H_list) - 1) * 2
        # print('beta:%.2e'%beta)
        I = np.eye(8)
        C_list = []
        for i in range(len(H_list)):
            H = H_list[i]  # L*M
            H_T = H.conj().T  # M*L
            C_list.append(pinv(H_T @ H + beta * I) @ H_T)  # M*L
        c_list = self.symmetry_IFFT(np.array(C_list), n=filter_len).transpose([1, 2, 0])  # M * L * n_fft

        for i in range(8):
            for j in range(8):
                cut_front = int(pre_noise_len[i, j] / fs * filter_len)
                c_list[:, :, -cut_front:] = 0  # 逆传递函数修剪
                c_list[:, :, :-(cut_front + int(keep_filter * filter_len))] = 0
        return c_list, C_list

    @timer
    def Inv_filtering(self, target, c_list):
        filter_len = c_list.shape[2]
        # 卷积得到时域的扬声器输出信号
        target2 = np.vstack((target[-filter_len + 1:, :], target, target[:filter_len:, :]))

        x_list = []
        L, M = 8, 8
        for l in range(L):  # speaker
            temp = []
            for m in range(M):  # mic
                # target：M * n_sample        c_list:,M*L*n_filter
                temp.append(convolve(target2[:, m], c_list[m, l, :], mode='valid', method='auto'))
                # temp.append(fftconvolve(target2[:, l], c_list[m, l, :], mode='valid'))
            x_list.append(np.array(temp).sum(axis=0)[filter_len:])
        amp_max = np.max(np.max(np.array(x_list)))
        return np.array(x_list), amp_max

    @timer
    def Cal_sim_P(self, x, h_list):
        filter_len = h_list.shape[2]
        x2 = np.hstack((x[:, -filter_len + 1:], x, x[:, :filter_len]))
        p_pred = []
        L, M = 8, 8
        for m in range(8):  # mic
            temp = []
            for l in range(L):  # speaker
                temp.append(convolve(x2[l, :], h_list[m, l, :], mode='valid', method='auto'))
            p_pred.append(np.array(temp).sum(axis=0)[:-filter_len])
        return np.array(p_pred)

    @timer
    def Validate(self, target, p_pred):
        Error = []
        for i in range(8):
            Error.append(self.Compair_bars(target[:, i], p_pred[i, :]))
        # print('**SPL Error:%.2f dB' % (np.array(Error).mean()))
        return np.array(Error)

    def check_EXPIRE_DAYS(self):
        EXPIRE_DAYS = datetime(2025, 10, 26, 0, 0, 0)
        timestamp = datetime.timestamp(datetime.now())
        if timestamp > datetime.timestamp(EXPIRE_DAYS):
            raise ('软件试用有效期至:' + str(EXPIRE_DAYS) + '，请联系技术人员续期！')
        else:
            pass

    def CPB(self, x, fs=48000):
        nperseg = 2 ** (int(np.log2(len(x))))
        # 1/3 倍频程中心频率
        center_freqs = np.array([1.60e+01, 2.00e+01, 2.50e+01, 3.15e+01, 4.00e+01, 5.00e+01, 6.30e+01, 8.00e+01,
                                 1.00e+02, 1.25e+02, 1.60e+02, 2.00e+02, 2.50e+02, 3.15e+02, 4.00e+02, 5.00e+02,
                                 6.30e+02, 8.00e+02, 1.00e+03, 1.25e+03, 1.60e+03, 2.00e+03, 2.50e+03, 3.15e+03,
                                 4.00e+03, 5.00e+03, 6.30e+03, 8.00e+03, 1.00e+04, 1.25e+04, 1.60e+04, 2.00e+04])
        # 获取各频带上下限
        f_start = 2 ** (-1 / 2 / 3) * center_freqs  # 平滑起始频率
        f_stop = 2 ** (1 / 2 / 3) * center_freqs  # 平滑截止频率
        m = len(center_freqs)  # 获取第0个坐标轴的数据维度 = np.size(x,0)
        delta_f = fs / len(x)  # FFT 频谱分辨率
        # 自相关函数计算功率谱函数
        Rxx = convolve(x, x, mode='full') / len(x)  # 计算自相关函数
        X_psd = np.abs(np.fft.fft(Rxx, nperseg, axis=0)[:nperseg // 2] / nperseg * 2)  # 计算功率谱
        freq = np.linspace(0, 1, len(X_psd)) * fs // 2  # 获取对应的频率点
        freq_axis = []
        y_out = []
        for i in range(m):  # 按照1/3倍频程阶次，计算响应曲线y均值
            index = np.where((freq >= f_start[i]) & (freq < f_stop[i]))  # 取交集
            if len(index[0]) != 0:
                data = np.take(X_psd, index)[0] * delta_f
                freq_axis.append(center_freqs[i])
                y_out.append(10 * np.log10(np.sum(data) / (4 * 1e-10)))  # 倍频程范围内功率谱密度求和（连续函数求积分）即为倍频程内能量
        return np.array(freq_axis), np.array(y_out)

    def symmetry_IFFT(self, Var, n):  # 对称后，计算频谱信号的傅立叶反变换，得到时域信号
        if Var.ndim == 3:
            temp = np.vstack((np.array(Var), np.flipud(np.array(Var)).conj()[1:-1, :, :]))
        elif Var.ndim == 2:
            temp = np.vstack((np.array(Var), np.flipud(np.array(Var)).conj()[1:-1, :]))
        else:
            temp = []
        return np.real(ifft(temp, n=n, axis=0))

    def Compair_bars(self, X, Y):
        fx1, SPL1 = self.CPB(X.reshape(-1, 1))
        fx2, SPL2 = self.CPB(Y.reshape(-1, 1))
        average_Error = np.abs(SPL2 - SPL1).mean()
        return average_Error

    def save_data(self, data_path, data):  # data_path = r'C:\Users\TXH\Desktop\SFR\random_in.wav'
        file_name, ext = os.path.splitext(data_path)
        sav_wav_path = file_name + '.wav'
        if os.path.exists(data_path):
            os.remove(data_path)
        sf.write(sav_wav_path, data, 48000, subtype='FLOAT')
        if sav_wav_path == data_path:
            pass
        else:
            os.rename(sav_wav_path, data_path)

    def write_log_data(self, log_data):
        with open('temp/log_data.txt', 'w+') as f:
            for item in log_data:
                f.writelines(item)
