import os
import shutil
from patient_information import find_patient_files,load_patient_data,get_grade,get_murmur
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from python_speech_features import logfbank
from sklearn.model_selection import StratifiedKFold
from tqdm import tqdm
import wave
import librosa.display
import librosa
import soundfile
from spafe.features.gfcc import erb_spectrogram
from spafe.utils.vis import show_spectrogram
from spafe.utils.preprocessing import SlidingWindow
# x, fs = librosa.load('data_5fold_new2/3_fold/train_data/2530_AV_Absent_2.wav', sr=4000)

def schmidt_spike_removal(original_signal, fs = 4000):
    windowsize = int(np.round(fs/4))
    trailingsamples = len(original_signal) % windowsize
    sampleframes = np.reshape(original_signal[0 : len(original_signal)-trailingsamples], (-1, windowsize) )
    MAAs = np.max(np.abs(sampleframes), axis = 1)
    while len(np.where(MAAs > np.median(MAAs)*3 )[0]) != 0:
        window_num = np.argmax(MAAs)
        spike_position = np.argmax(np.abs(sampleframes[window_num,:]))
        zero_crossing = np.abs(np.diff(np.sign(sampleframes[window_num, :])))
        if len(zero_crossing) == 0:
            zero_crossing = [0]
        zero_crossing = np.append(zero_crossing, 0)
        if len(np.nonzero(zero_crossing[:spike_position+1])[0]) > 0:
            spike_start = np.nonzero(zero_crossing[:spike_position+1])[0][-1]
        else:
            spike_start = 0
        zero_crossing[0:spike_position+1] = 0
        spike_end = np.nonzero(zero_crossing)[0][0]
        sampleframes[window_num, spike_start : spike_end] = 0.0001;
        MAAs = np.max(np.abs(sampleframes), axis = 1)
    despiked_signal = sampleframes.flatten()
    despiked_signal = np.concatenate([despiked_signal, original_signal[len(despiked_signal) + 1:]])
    return despiked_signal

def cut_copy_files(data_directory: str, ident: str, out_directory: str) -> None:
    files = os.listdir(data_directory)
    for f in files:
        root, extension = os.path.splitext(f)
        if f.startswith(ident) :
            if extension == '.txt':
                _ = shutil.copy(os.path.join(data_directory, f), out_directory)
            elif extension == '.wav':
                #获取当前wav文件的ID 听诊区 等级
                with open(os.path.join(data_directory, ident+'.txt'), 'r') as txt_f:
                    txt_data = txt_f.read()
                    patient_ID=txt_data.split('\n')[0].split()[0]#获取病人ID
                    grade = get_grade(txt_data)
                    location = root.split('_')[1]
                recording, fs = librosa.load(os.path.join(data_directory, f), sr=4000)  # 分割（3s不重叠）
                num_cut = len(recording) / (3 * 4000)  # 每个记录的片段数量
                # time = len(recording)/fs
                if num_cut >=2 :
                    recording = recording[2*fs:len(recording)-fs]
                # recording = (recording- np.mean(recording))/ np.max(np.abs(recording)) #幅值归一化
                # recording = schmidt_spike_removal(recording) #去尖峰
                start = 0
                end = start+3*fs
                cut = list()
                num_cut = len(recording) / (3 * 4000)
                for num in range(int(num_cut)):  # 将每个片段写入对应的听诊区文件夹
                    small = recording[start:end]
                    cut.append(small)
                    soundfile.write(out_directory + '/' + patient_ID + '_'+str(location)+'_' + str(grade) + '_' + str(num) + '.wav', cut[num], fs)
                    start += 3 * fs
                    end = start + 3 * fs

def stratified_test_vali_split(
    stratified_features: list,
    data_directory: str,#数据集路径
    out_directory: str,
    num_fold: int,
    random_state: int,
):
    # Check if out_directory directory exists, otherwise create it.
    if not os.path.exists(out_directory):
        os.makedirs(out_directory)
    else:
        # exit()
        shutil.rmtree(out_directory) #删除输出路径下的文件夹
        os.makedirs(out_directory)
    # Get metadata
    patient_files = find_patient_files(data_directory)#获取排序后的病人text文件
    num_patient_files = len(patient_files)
    murmur_classes = ["Absent", "Soft", "Loud"]
    num_murmur_classes = len(murmur_classes)
    murmurs = list()
    patient_ID = list()
    for i in tqdm(range(num_patient_files)): #可视化遍历进度条，遍历每个病人的txt文件
        # Load the current patient data and recordings.
        current_patient_data = load_patient_data(patient_files[i]) #获取text文本内容
        murmur_unkonwn=get_murmur(current_patient_data)
        #跳过unknown的数据
        if murmur_unkonwn == 'Unknown':
            continue
        current_ID = current_patient_data.split(" ")[0] #获取ID
        patient_ID.append(current_ID)#添加ID
        current_murmur = np.zeros(num_murmur_classes, dtype=int)
        murmur = get_grade(current_patient_data)  # 获取杂音标签
        if murmur in murmur_classes:
            j = murmur_classes.index(murmur)
            current_murmur[j] = 1            #"Aresent"=100,soft=010
        murmurs.append(current_murmur)   #保存杂音标签
    patient_ID = np.vstack(patient_ID)
    num_murmurs=len(murmurs);#筛除unknown后的患者数量
    print(len(murmurs))
    murmurs = np.vstack(murmurs);
    #指标
    patients_pd = pd.DataFrame(patient_ID,columns=['ID'],)
    murmurs_pd = pd.DataFrame(murmurs, columns=murmur_classes)
    complete_pd = pd.concat([patients_pd, murmurs_pd], axis=1)  # 拼接表格（ID，杂音label）
    complete_pd["ID"] = complete_pd["ID"].astype(int).astype(str)
    # Split data
    complete_pd["stratify_column"] = (
        complete_pd[stratified_features].astype(str).agg("-".join, axis=1)# 添加一列：Absent-Soft-Loud
    )
    print(complete_pd)
    # # 测试集
    # complete_pd_train, complete_pd_test = train_test_split(
    #     complete_pd,
    #     test_size = test_size,
    #     random_state=random_state,
    #     stratify=complete_pd["stratify_column"],
    # )
    # if not os.path.exists(os.path.join(out_directory, "test_data")):
    #     os.makedirs(os.path.join(out_directory, "test_data"))
    #     for f in complete_pd_test["ID"]:
    #         cut_copy_files(
    #             data_directory,
    #             f,
    #             os.path.join(out_directory, "test_data/"),
    #         )



    complete_pd_train = complete_pd.reset_index(drop=True)
    #5折交叉验证
    skf = StratifiedKFold(n_splits=num_fold, random_state=random_state, shuffle=True)
    i = 1
    for train_index, vali_index in skf.split(complete_pd_train, complete_pd_train["stratify_column"]):
        # print("TRAIN:", train_index, "TEST:", vali_index)
        kfold_pd_train = complete_pd_train.loc[train_index]
        kfold_pd_vali = complete_pd_train.loc[vali_index]
        kfold_out_directory = os.path.join(out_directory, str(i)+"_fold")
        if not os.path.exists(kfold_out_directory):
            os.makedirs(kfold_out_directory)
        #某一折的训练集
        if not os.path.exists(os.path.join(kfold_out_directory, "train_data")):
            os.makedirs(os.path.join(kfold_out_directory, "train_data"))
            for f in kfold_pd_train["ID"]:
                cut_copy_files(
                    data_directory,
                    f,
                    os.path.join(kfold_out_directory, "train_data/"),
                )

        # 某一折的验证集
        if not os.path.exists(os.path.join(kfold_out_directory, "vali_data")):
            os.makedirs(os.path.join(kfold_out_directory, "vali_data"))
            for f in kfold_pd_vali["ID"]:
                cut_copy_files(
                    data_directory,
                    f,
                    os.path.join(kfold_out_directory, "vali_data/"),
                )
        i=i+1


def FrameTimeC(frameNum, frameLen, inc, fs):
    ll = np.array([i for i in range(frameNum)])
    return ((ll - 1) * inc + frameLen / 2) / fs
def FrequencyScale(nfilt,fs):
    high_freq_mel = (2595 * np.log10(1 + (fs / 2) / 700))  # 求最高hz频率对应的mel频率
# 我们要做40个滤波器组，为此需要42个点，这意味着在们需要low_freq_mel和high_freq_mel之间线性间隔40个点
    mel_points = np.linspace(0, high_freq_mel, nfilt + 2)  # 在mel频率上均分成42个点
    hz_points = (700 * (10 ** (mel_points / 2595) - 1))  # 将mel频率再转到hz频率
    return hz_points

def Log_Mel_Spec(data_directory ):
    logmelspec = list()
    for f in sorted(os.listdir(data_directory)):
        root, extension = os.path.splitext(f)
        if extension == '.wav':
            x, fs = librosa.load(os.path.join(data_directory, f), sr=4000)
            x = x - np.mean(x)
            x = x / np.max(np.abs(x))
            fbank_feat = logfbank(x,fs,winlen=0.025,winstep=0.0125,nfilt=64,nfft=512,lowfreq=0,highfreq=800)
            fbank_feat=fbank_feat.T
            fbank_feat = feature_norm(fbank_feat)

            # fbank_feat = feature_norm(fbank_feat)
            # fbank_feat = delt_feature(fbank_feat)
            logmelspec.append(fbank_feat)

        else:
            continue
    return np.array(logmelspec)

def Demographic_feature(data_directory):
    feat =[]
    for f in sorted(os.listdir(data_directory)):
        root, extension = os.path.splitext(f)
        if extension == '.wav':
            the_id = root.split("_")[0].strip()
            txt_data = load_patient_data(os.path.join(data_directory, the_id + '.txt'))



def Log_GF(data_directory ):
    loggamma = list()
    for f in sorted(os.listdir(data_directory)):
        root, extension = os.path.splitext(f)
        if extension == '.wav':
            x, fs = librosa.load(os.path.join(data_directory, f), sr=4000)
            x = x - np.mean(x)
            x = x / np.max(np.abs(x))
            gSpec, gfreqs = erb_spectrogram(x,
                                            fs=fs,
                                            pre_emph=0,
                                            pre_emph_coeff=0.97,
                                            window=SlidingWindow(0.025, 0.0125, "hamming"),
                                            nfilts=64,
                                            nfft=512,
                                            low_freq=25,
                                            high_freq=2000)
            fbank_feat= gSpec.T
            fbank_feat = np.log(fbank_feat)
            fbank_feat = feature_norm(fbank_feat)

            # fbank_feat = feature_norm(fbank_feat)
            # fbank_feat = delt_feature(fbank_feat)
            loggamma.append(fbank_feat)

        else:
            continue
    return np.array(loggamma)


def LogMel_Energy(data_directory ):
    logmel_energy= list()
    for f in sorted(os.listdir(data_directory)):
        root, extension = os.path.splitext(f)
        if extension == '.wav':
            x, fs = librosa.load(os.path.join(data_directory, f), sr=4000)
            x = x - np.mean(x)
            x = x / np.max(np.abs(x))
            mel_spec = librosa.feature.melspectrogram(y=x, sr=fs, n_fft=512, hop_length=50, win_length=100, n_mels=32)
            logmel = np.log(mel_spec+1e-7)
            stft_coff = abs(librosa.stft(x, n_fft=512, hop_length=50, win_length=100))
            energy = np.sum(np.square(stft_coff), 0)
            energy = np.log(energy)
            logMel_Energy = np.vstack((logmel, energy))
            fbank_feat = feature_norm(logMel_Energy)
            logmel_energy.append(fbank_feat)

        else:
            continue
    return np.array(logmel_energy)


def feature_norm(feat):
    normalized_feat = (feat - feat.min()) / (feat.max() - feat.min())
    # mean = np.mean(data)
    # std = np.std(data)
    # # 使用z-score方法进行归一化
    # normalized_data = (data - mean) / std
    return normalized_feat

def Three_channel_Mel(data_directory ):
    logmelspec = list()
    for f in sorted(os.listdir(data_directory)):
        root, extension = os.path.splitext(f)
        if extension == '.wav':
            three_channel_feat = np.zeros((3, 32, 239))
            x, fs = librosa.load(os.path.join(data_directory, f), sr=4000)
            x = x - np.mean(x)
            x = x / np.max(np.abs(x))
            fbank_feat = logfbank(x,fs,winlen=0.025,winstep=0.0125,nfilt=32,nfft=512,lowfreq=0,highfreq=800)
            fbank_feat=fbank_feat.T
            delta1 = librosa.feature.delta(fbank_feat, order=1)
            delta2 = librosa.feature.delta(fbank_feat, order=2)
            three_channel_feat[0] = fbank_feat
            three_channel_feat[1] = delta1
            three_channel_feat[2] = delta2
            logmelspec.append(three_channel_feat)
        else:
            continue
    return np.array(logmelspec)

def delt_feature(data):
    # 计算一阶差分
    delta1 = librosa.feature.delta(data, order=1)
    # 计算二阶差分
    delta2 = librosa.feature.delta(data, order=2)
    # 将特征拼接起来
    features = np.concatenate((data, delta1, delta2), axis=0)
    return features

def get_Murmur_locations(data):
    Murmur_locations = None
    for text in data.split("\n"):
        if text.startswith("#Murmur locations:"):
            Murmur_locations = text.split(": ")[1].strip()
    return Murmur_locations

def get_label(data_directory ):
    label=list()
    location=list()
    id=list()

    for f in sorted(os.listdir(data_directory)):
        root, extension = os.path.splitext(f)
        if extension == '.wav':
            the_location = root.split("_")[1].strip()
            location.append(the_location)

            the_id = root.split("_")[0].strip()
            id.append(the_id)

            # 将不存在杂音的听诊位置标为absent
            the_label = 'Absent'
            txt_data = load_patient_data(os.path.join(data_directory, the_id + '.txt'))
            murmur_locations = (get_Murmur_locations(txt_data)).split("+")
            for i in range(len(murmur_locations)):
                if the_location == murmur_locations[i]:
                    the_label = root.split("_")[2].strip()
            # the_label = root.split("_")[2].strip()
            if the_label == 'Absent':
                grade = 0
            elif the_label == 'Soft':
                grade = 1
            elif the_label == 'Loud':
                grade = 2
            label.append(grade)


    return np.array(label),np.array(location),np.array(id)

def get_index(data_directory ):
    index = list()
    i=0
    for f in sorted(os.listdir(data_directory)):
        root, extension = os.path.splitext(f)
        if extension == '.wav':
            index.append(i)
            i=i+1
        else:
            continue
    return np.array(index)


if __name__ == "__main__":
    data_directory = "/home/dsp610/HZH/2022_challenge_new/the-circor-digiscope-phonocardiogram-dataset-1.0.3/training_data"
    out_directory="data_5fold_new"
    num_fold = 5
    random_state=2023
    stratified_features = ["Absent", "Soft", "Loud"]
    stratified_test_vali_split(stratified_features,data_directory,out_directory,num_fold,random_state)
    files = os.listdir(out_directory)

    # #提取某一折特征
    # f='2_fold'
    # train_data_directory = os.path.join(out_directory, f, "train_data")
    # vali_data_directory = os.path.join(out_directory, f, "vali_data")
    # # test_data_directory = os.path.join(out_directory,  "test_data")
    # label_directory = os.path.join(out_directory, f, "label")
    # logmel_directory = os.path.join(out_directory, f, "logmel")

    # train_feature =Log_GF(train_data_directory)
    # vali_feature = Log_GF(vali_data_directory)
    # np.save(logmel_directory + r'/train_loggamma.npy', train_feature)
    # np.save(logmel_directory + r'/vali_loggamma.npy', vali_feature)

    # train_label, train_location, train_id = get_label(train_data_directory)
    # vali_label, vali_location, vali_id = get_label(vali_data_directory)
    # np.save(label_directory + r'/train_label.npy', train_label)
    # np.save(label_directory + r'/vali_label.npy', vali_label)

    # train_index = get_index(train_data_directory)
    # np.save(label_directory + r'/train_index.npy', train_index)



    for f in files:
        if f != '说明.txt':
            train_data_directory = os.path.join(out_directory, f, "train_data")
            vali_data_directory = os.path.join(out_directory, f, "vali_data")
            # test_data_directory = os.path.join(out_directory,  "test_data")
            label_directory = os.path.join(out_directory, f, "label")
            logmel_directory = os.path.join(out_directory, f, "logmel")
            if not os.path.exists(logmel_directory):
                os.makedirs(logmel_directory)
                train_feature = Log_Mel_Spec(train_data_directory)
                vali_feature = Log_Mel_Spec(vali_data_directory)
                np.save(logmel_directory + r'/train_feature.npy', train_feature)
                np.save(logmel_directory + r'/vali_feature.npy', vali_feature)

            if not os.path.exists(label_directory):
                os.makedirs(label_directory)
                train_label, train_location, train_id = get_label(train_data_directory)
                vali_label, vali_location, vali_id = get_label(vali_data_directory)
                np.save(label_directory + r'/train_label.npy', train_label)
                np.save(label_directory + r'/vali_label.npy', vali_label)

                # 保存每个wav的听诊区
                # np.save(label_directory + r'/train_location.npy', train_location)
                np.save(label_directory + r'/vali_location.npy', vali_location)
                # 保存每个wav的ID
                # np.save(label_directory + r'/train_id.npy', train_id)
                np.save(label_directory + r'/vali_id.npy', vali_id)
                # 保存每个wav索引
                # train_index = get_index(train_data_directory)
                vali_index = get_index(vali_data_directory)

                # np.save(label_directory + r'/train_index.npy', train_index)
                np.save(label_directory + r'/vali_index.npy', vali_index)
    print("over")
        # else:
        #     test_data_directory = os.path.join(out_directory,  "test_data")
        #     label_directory = os.path.join(out_directory, f, "label")
        #     logmel_directory = os.path.join(out_directory, f, "logmel")
        #     if not os.path.exists(logmel_directory):
        #         os.makedirs(logmel_directory)
        #         test_feature = Log_Mel_Spec(test_data_directory)
        #         np.save(logmel_directory + r'/test_feature.npy', test_feature)
        #     if not os.path.exists(label_directory):
        #         os.makedirs(label_directory)
        #         test_label, test_location, test_id = get_label(test_data_directory)
        #         np.save(label_directory + r'/test_label.npy', test_label)
        #         np.save(label_directory + r'/test_location.npy', test_location)
        #         np.save(label_directory + r'/test_id.npy', test_id)
        #         test_index = get_index(test_data_directory)
        #         np.save(label_directory + r'/test_index.npy', test_index)










