import os
from patient_information import get_height,get_weight,load_patient_data,get_pregnancy_status,get_age,compare_strings,get_sex
import numpy as np
import librosa
import warnings
from scipy import signal
from python_speech_features import logfbank
from spafe.features.gfcc import erb_spectrogram
from spafe.utils.vis import show_spectrogram
from spafe.utils.preprocessing import SlidingWindow
warnings.filterwarnings('ignore')

def print_abnormal(data_directory):
     for f in sorted(os.listdir(data_directory)):
         root, extension = os.path.splitext(f)
         if extension == '.wav':
             x, fs = librosa.load(os.path.join(data_directory, f), sr=4000)
             # x = butter_filter(x)
             if len(x)<12000:
                 print(f,len(x))
             windowsize = int(np.round(fs / 4))
             trailingsamples = len(x) % windowsize
             sampleframes = np.reshape(x[0: len(x) - trailingsamples], (-1, windowsize))
             MAAs = np.max(np.abs(sampleframes), axis=1)
             if len(np.where(MAAs > np.median(MAAs) * 3))> 1:
                 print(f)



def pre_data(data,fs):
    # data = schmidt_spike_removal(data,fs)
    # print(data.shape)
    # data = butter_filter(data)
    data = (data- np.mean(data))/ np.max(np.abs(data))
    return data

def butter_filter(data):
    b, a = signal.butter(4, [0.0125, 0.2], 'bandpass')  #4阶,25Hz-400Hz
    data = signal.filtfilt(b, a, data)
    return  data
def schmidt_spike_removal(original_signal, fs=4000):
    windowsize = int(np.round(fs / 4))
    trailingsamples = len(original_signal) % windowsize
    sampleframes = np.reshape(original_signal[0: len(original_signal) - trailingsamples], (-1, windowsize))
    MAAs = np.max(np.abs(sampleframes), axis=1)
    while len(np.where(MAAs > np.median(MAAs) * 3)[0]) != 0:
        window_num = np.argmax(MAAs)
        spike_position = np.argmax(np.abs(sampleframes[window_num, :]))
        zero_crossing = np.abs(np.diff(np.sign(sampleframes[window_num, :])))
        if len(zero_crossing) == 0:
            zero_crossing = [0]
        zero_crossing = np.append(zero_crossing, 0)
        if len(np.nonzero(zero_crossing[:spike_position + 1])[0]) > 0:
            spike_start = np.nonzero(zero_crossing[:spike_position + 1])[0][-1]
        else:
            spike_start = 0
        zero_crossing[0:spike_position + 1] = 0
        spike_end = np.nonzero(zero_crossing)[0][0]
        sampleframes[window_num, spike_start: spike_end] = 0.0001
        MAAs = np.max(np.abs(sampleframes), axis=1)
    despiked_signal = sampleframes.flatten()
    despiked_signal = np.concatenate([despiked_signal, original_signal[len(despiked_signal) + 1:]])
    return despiked_signal

def feature_norm(feat):
    normalized_feat = (feat - feat.min()) / (feat.max() - feat.min())
    # mean = np.mean(data)
    # std = np.std(data)
    # # 使用z-score方法进行归一化
    # normalized_data = (data - mean) / std
    return normalized_feat

def Log_Mel_Spec(data_directory ):
    logmelspec = list()
    for f in sorted(os.listdir(data_directory)):
        root, extension = os.path.splitext(f)
        if extension == '.wav':
            x, fs = librosa.load(os.path.join(data_directory, f), sr=4000)
            x = pre_data(x,fs)
            fbank_feat = logfbank(x,fs,winlen=0.025,winstep=0.0125,nfilt=32,nfft=512,lowfreq=0,highfreq=800)
            fbank_feat=fbank_feat.T
            fbank_feat = feature_norm(fbank_feat)

            # fbank_feat = feature_norm(fbank_feat)
            # fbank_feat = delt_feature(fbank_feat)
            logmelspec.append(fbank_feat)

        else:
            continue
    return np.array(logmelspec)

def Log_GF(data_directory ):
    loggamma = list()
    for f in sorted(os.listdir(data_directory)):
        root, extension = os.path.splitext(f)
        if extension == '.wav' :
            x, fs = librosa.load(os.path.join(data_directory, f), sr=4000)
            x = pre_data(x,fs)
            gSpec, gfreqs = erb_spectrogram(x,
                                            fs=fs,
                                            pre_emph=0,
                                            pre_emph_coeff=0.97,
                                            window=SlidingWindow(0.025, 0.0125, "hamming"),
                                            nfilts=64,
                                            nfft=512,
                                            low_freq=25,
                                            high_freq=2000)
            fbank_feat= gSpec.T
            fbank_feat = np.log(fbank_feat)
            fbank_feat = feature_norm(fbank_feat)

            # fbank_feat = feature_norm(fbank_feat)
            # fbank_feat = delt_feature(fbank_feat)
            loggamma.append(fbank_feat)

        else:
            continue
    return np.array(loggamma)

def demographic_feature(data_directory):
    feat =[]
    for f in sorted(os.listdir(data_directory)):
        root, extension = os.path.splitext(f)
        if extension == '.wav':
            the_id = root.split("_")[0].strip()
            data = load_patient_data(os.path.join(data_directory, the_id + '.txt'))
            age_group = get_age(data)
            if compare_strings(age_group, 'Neonate'):
                age = 0.5
            elif compare_strings(age_group, 'Infant'):
                age = 6
            elif compare_strings(age_group, 'Child'):
                age = 6 * 12
            elif compare_strings(age_group, 'Adolescent'):
                age = 15 * 12
            else:
                age = 6 * 12  # 6 * 12 # mode

            sex = get_sex(data)
            sex_features = 0
            if compare_strings(sex, 'Female'):
                sex_features = 1

            # Extract height and weight.
            height = get_height(data)
            weight = get_weight(data)
            if np.isnan(height):
                height = 110  # 110
            if np.isnan(weight):
                weight = 23  # 23
            bmi = np.round(weight / ((height / 100) ** 2), 2)
            if bmi > 30:
                bmi = 30

            # Extract pregnancy status.
            is_pregnant = get_pregnancy_status(data)

            age = age / (15 * 12)
            bmi = bmi / 30

            features = np.hstack(([age], [bmi], [sex_features], [is_pregnant]))
            feat.append(features)
        else:
            continue

    return np.asarray(feat)




def statistical_feature(data_directory):
    feat=[]
    for f in sorted(os.listdir(data_directory)):
        root, extension = os.path.splitext(f)
        if extension == '.wav':
            x, fs = librosa.load(os.path.join(data_directory, f), sr=4000)
            x = pre_data(x,fs)
            stat_feat = hand_fea(x)
            feat.append(stat_feat)
        else:
            continue
    return np.array(feat)



def get_zrc(sig):

    # sig_len_act = np.nonzero(sig.numpy())[-1]
    # zcr = librosa.zero_crossings(sig.numpy())
    # zcr_ratio = np.sum(zcr)/sig_len
    zcr = librosa.feature.zero_crossing_rate(y=sig, frame_length=100, hop_length=50)
    zcr = max_min(zcr)
    zcr_mean = np.mean(np.squeeze(zcr))
    zcr_std = np.std(np.squeeze(zcr))
    return zcr_mean, zcr_std

def max_min(feat):
    feat_normalized = (feat - feat.min()) / (feat.max() - feat.min())
    return feat_normalized


def get_spec(x):
    spec_bw = librosa.feature.spectral_bandwidth(y=x, sr=4000, n_fft=512, win_length=100, hop_length=50)
    spec_bw = max_min(spec_bw)
    spectral_centroids = librosa.feature.spectral_centroid(y=x, sr=4000, n_fft=512, win_length=100, hop_length=50)
    spectral_centroids = max_min(spectral_centroids)
    stft_coff = abs(librosa.stft(x, n_fft=512, hop_length=50, win_length=100))
    spec_energy = np.sum(np.square(stft_coff), 0)
    spec_energy = max_min(spec_energy)
    return np.mean(spectral_centroids) , np.std(spectral_centroids), np.mean(spec_bw), np.std(spec_bw),np.mean(spec_energy),np.std(spec_energy)

def hand_fea(sig):
    zcr_mean, zcr_std = get_zrc(sig)
    spec_mean, spec_std, spbw_mean, spbw_std,energy_mean,energy_std = get_spec(sig)
    fea = np.asarray([zcr_mean, zcr_std, spec_mean, spec_std, spbw_mean, spbw_std,energy_mean,energy_std])
    return fea

def get_wavname(data_directory):
    name = list()
    for f in sorted(os.listdir(data_directory)):
        root, extension = os.path.splitext(f)
        if extension == '.wav':
            name.append(root)
    return np.array(name)

def get_wav(data_directory):
    data = list()
    for f in sorted(os.listdir(data_directory)):
        root, extension = os.path.splitext(f)
        if extension == '.wav':
            x, fs = librosa.load(os.path.join(data_directory, f), sr=4000)
            x = pre_data(x, fs)
            data.append(x)
    return np.array(data)

def get_label(data_directory ):
    label=list()
    location=list()
    id=list()

    for f in sorted(os.listdir(data_directory)):
        root, extension = os.path.splitext(f)
        if extension == '.wav' :
            the_location = root.split("_")[1].strip()
            location.append(the_location)

            the_id = root.split("_")[0].strip()
            id.append(the_id)

            # 将不存在杂音的听诊位置标为absent
            the_label = 'Absent'
            txt_data = load_patient_data(os.path.join(data_directory, the_id + '.txt'))
            murmur_locations = (get_Murmur_locations(txt_data)).split("+")
            for i in range(len(murmur_locations)):
                if the_location == murmur_locations[i]:
                    the_label = root.split("_")[2].strip()
            # the_label = root.split("_")[2].strip()
            if the_label == 'Absent':
                grade = 0
            elif the_label == 'Soft':
                grade = 1
            elif the_label == 'Loud':
                grade = 2
            label.append(grade)


    return np.array(label),np.array(location),np.array(id)

def get_original_label(data_directory ):
    label=list()
    location=list()
    id=list()

    for f in sorted(os.listdir(data_directory)):
        root, extension = os.path.splitext(f)
        if extension == '.wav' :
            the_location = root.split("_")[1].strip()
            location.append(the_location)

            the_id = root.split("_")[0].strip()
            id.append(the_id)

            # 将不存在杂音的听诊位置标为absent
            the_label = "nan"

            txt_data = load_patient_data(os.path.join(data_directory, the_id + '.txt'))
            for text in txt_data.split("\n"):
                if text.startswith("#Systolic murmur grading:"):
                    patient_grade = text.split(": ")[1].strip()
            murmur_locations = (get_Murmur_locations(txt_data)).split("+")
            for i in range(len(murmur_locations)):
                if the_location == murmur_locations[i]:
                    the_label = patient_grade

            # the_label = root.split("_")[2].strip()
            if the_label == 'nan':
                grade = 0
            elif the_label == 'I/VI':
                grade = 1
            elif the_label == 'II/VI':
                grade = 2
            elif the_label == 'III/VI':
                grade = 3
            label.append(grade)


    return np.array(label),np.array(location),np.array(id)
def get_Murmur_locations(data):
    Murmur_locations = None
    for text in data.split("\n"):
        if text.startswith("#Murmur locations:"):
            Murmur_locations = text.split(": ")[1].strip()
    return Murmur_locations
def get_index(data_directory ):
    index = list()
    i=0
    for f in sorted(os.listdir(data_directory)):
        root, extension = os.path.splitext(f)
        if extension == '.wav' :
            index.append(i)
            i=i+1
        else:
            continue
    return np.array(index)

if __name__ == "__main__":
    out_directory="data_new"
    # stratified_features = ["Absent", "Soft", "Loud"]
    # stratified_test_vali_split(stratified_features,data_directory,out_directory,test_size,num_fold,random_state)
    files = os.listdir(out_directory)

    #提取某一折特征
    f='no_fold'
    train_data_directory = os.path.join(out_directory, f, "train_data")
    vali_data_directory = os.path.join(out_directory, f, "vali_data")
    label_directory = os.path.join(out_directory, f, "label")
    feature_directory = os.path.join(out_directory, f, "feature")
    # train_wavtata = get_wav(train_data_directory)
    # vali_wavdata = get_wav(vali_data_directory)

    # train_feature = Log_GF(train_data_directory)
    # vali_feature = Log_GF(vali_data_directory)
    # train_feature = Log_Mel_Spec(train_data_directory)
    # vali_feature = Log_Mel_Spec(vali_data_directory)
    # np.save(feature_directory + r'/TV/train_loggamma.npy', train_feature)
    # np.save(feature_directory + r'/TV/vali_loggamma.npy', vali_feature)
    train_label, train_location, train_id = get_original_label(train_data_directory)
    vali_label, vali_location, vali_id = get_original_label(vali_data_directory)
    np.save(label_directory + r'/train_origin_label.npy', train_label)
    np.save(label_directory + r'/vali_origin_label.npy', vali_label)
    np.save(label_directory + r'/vali_id.npy', vali_id)
    # vali_index = get_index(vali_data_directory)
    # np.save(label_directory + r'/vali_index.npy', vali_index)
    # train_index = get_index(train_data_directory)
    # np.save(label_directory + r'/train_index.npy', train_index)
    # np.save(feature_directory + r'/train_wavdata.npy', train_wavtata)
    # np.save(feature_directory + r'/vali_wavdata.npy', vali_wavdata)

    # train_feature = statistical_feature(train_data_directory)
    # vali_feature = statistical_feature(vali_data_directory)
    # np.save(statistical_directory + r'/train_static.npy', train_feature)
    # np.save(statistical_directory + r'/vali_static.npy', vali_feature)

    for f in files:
        if f != '说明.txt':
            train_data_directory = os.path.join(out_directory, f, "train_data")
            vali_data_directory = os.path.join(out_directory, f, "vali_data")
            # print_abnormal(train_data_directory)
            label_directory = os.path.join(out_directory, f, "label")
            feature_directory = os.path.join(out_directory, f, "feature")
            statistical_directory = os.path.join(out_directory, f, "statistical_feature")
            if not os.path.exists(feature_directory):
                os.makedirs(feature_directory)
                train_feature = Log_Mel_Spec(train_data_directory)
                vali_feature = Log_Mel_Spec(vali_data_directory)
                np.save(feature_directory + r'/train_logmel.npy', train_feature)
                np.save(feature_directory + r'/vali_logmel.npy', vali_feature)
                train_feature = Log_GF(train_data_directory)
                vali_feature = Log_GF(vali_data_directory)
                np.save(feature_directory + r'/train_loggamma.npy', train_feature)
                np.save(feature_directory + r'/vali_loggamma.npy', vali_feature)

            if not os.path.exists(statistical_directory):
                os.makedirs(statistical_directory)

                train_feature =demographic_feature(train_data_directory)
                vali_feature = demographic_feature(vali_data_directory)
                np.save(statistical_directory + r'/train_demographic.npy', train_feature)
                np.save(statistical_directory + r'/vali_demographic.npy', vali_feature)

                train_feature = statistical_feature(train_data_directory)
                vali_feature = statistical_feature(vali_data_directory)
                np.save(statistical_directory + r'/train_static.npy', train_feature)
                np.save(statistical_directory + r'/vali_static.npy', vali_feature)

            if not os.path.exists(label_directory):
                os.makedirs(label_directory)
                train_label, train_location, train_id = get_label(train_data_directory)
                vali_label, vali_location, vali_id = get_label(vali_data_directory)
                np.save(label_directory + r'/train_label.npy', train_label)
                np.save(label_directory + r'/vali_label.npy', vali_label)

                # 保存每个wav的听诊区
                np.save(label_directory + r'/train_location.npy', train_location)
                np.save(label_directory + r'/vali_location.npy', vali_location)
                # 保存每个wav的ID
                np.save(label_directory + r'/train_id.npy', train_id)
                np.save(label_directory + r'/vali_id.npy', vali_id)
                # 保存每个wav索引
                train_index = get_index(train_data_directory)
                vali_index = get_index(vali_data_directory)

                np.save(label_directory + r'/train_index.npy', train_index)
                np.save(label_directory + r'/vali_index.npy', vali_index)
    print("over")