import numpy as np
from scipy.io import wavfile
import wave
from scipy import signal
from sklearn.impute import SimpleImputer
import pandas as pd
import os
from imblearn.over_sampling import SMOTE
from sklearn.preprocessing import StandardScaler as sk_StandardScaler
from python_speech_features import mfcc

def read_part_wavefile(filename,second,downsampling_ratio):
    WAVE = wave.open(filename)
    a = WAVE.getparams().nframes  # 帧总数
    f = WAVE.getparams().framerate  # 采样频率
    sample_time = 1 / f  # 采样点的时间间隔
    time = a / f  # 声音信号的长度
    sample_frequency, audio_sequence = wavfile.read(filename)
    x_seq = np.arange(0, second, sample_time * downsampling_ratio)
    part_audio_sequence = audio_sequence[0:f * second:downsampling_ratio]

    return part_audio_sequence, x_seq, f

def read_wavefile(filename,downsampling_ratio):
    WAVE = wave.open(filename)
    a = WAVE.getparams().nframes  # 帧总数
    f = WAVE.getparams().framerate  # 采样频率

    sample_time = 1 / f  # 采样点的时间间隔
    time = a / f  # 声音信号的长度
    sample_frequency, audio_sequence = wavfile.read(filename)
    x_seq = np.arange(0, time, sample_time * downsampling_ratio)
    part_audio_sequence = audio_sequence[0:a:downsampling_ratio]

    return part_audio_sequence, x_seq, f

def RMSE(y1,y2):
    return np.sqrt(np.sum((y1-y2)**2)/y1.shape[0])

def PSNR(rmse,n):
    return 20 * np.log10((np.power(2,n)-1)/rmse)

def MaxMinNormalization(x):
    Max = float(max(x))
    Min = float(min(x))
    x = np.array([float(k - Min) / (Max - Min) for k in x])
    return x*2-1

def StandardScaler(x):
    x_mean = np.mean(x)
    x_std = np.std(x)
    std_x = (x - x_mean) / x_std
    return std_x

def butterworth_bandpass_filtering(data,f,downsampling_ratio):
    wn1 = downsampling_ratio * 20 / f
    wn2 = downsampling_ratio * 500 / f
    wn = [wn1, wn2]

    b, a = signal.butter(8, wn, 'bandpass')  # 配置滤波器 8 表示滤波器的阶数
    filtedData = signal.filtfilt(b, a, data)

    return filtedData

def featureStandardization(feature_value_path):
    feature_df = pd.read_csv(feature_value_path)

    inf_position = []

    for col in range(feature_df.shape[1]):
        cur_feature = np.array(feature_df.iloc[:,col])
        inf_idx = np.where(cur_feature == np.inf)[0]
        if len(inf_idx) != 0:
            for idx in inf_idx:
                inf_position.append([idx,col])

    for position in inf_position:
        row = position[0]
        col = position[1]
        cur_value = np.array(sorted(feature_df.iloc[:,col].values))
        cur_max_idx = np.where(cur_value == np.inf)[0][0]
        feature_value_max = cur_value[0:cur_max_idx][-1]
        feature_df.iloc[row][col] = feature_value_max

    df_mean = SimpleImputer(missing_values=np.nan,strategy="mean")
    feature_df = df_mean.fit_transform(feature_df)

    feature = feature_df
    scaler = sk_StandardScaler()
    std_feature = scaler.fit_transform(feature)

    # min_max_scaler = MinMaxScaler()
    # std_feature = min_max_scaler.fit_transform(std_feature)

    return std_feature

def get_train_data():
    '''

    :return: x_train, y_train
    '''
    train_path = "assets/csvs/train_csv/train_features.csv"
    std_train_features = featureStandardization(train_path)

    label_df = pd.read_csv("./assets/csvs/label.csv")
    label_name = label_df["label_name"].values
    label_value = label_df["label_value"].values

    # 生成 all_label 字典
    all_label= {}
    for idx in range(len(label_name)):
        name = label_name[idx]
        value = label_value[idx]
        all_label[name] = value

    # 生成 train_label
    train_label = []
    train_path = "./assets/train"
    for file in os.listdir(train_path):
        file_num = file.split(".")[0]
        train_label.append(all_label[file_num])

    train_label = np.array(train_label)
    for idx in range(len(train_label)):
        if train_label[idx] == -1:
            train_label[idx] = 0

    return std_train_features, train_label

def get_smote_data(x_train,y_train):
    '''

    :param x_train: 训练特征
    :param y_train: 训练label
    :return: smote之后的 x_train, y_train, smote 前标签数量对比df, smote 后标签数量对比df
    '''
    features_name_df = pd.read_csv("assets/csvs/feature_names.csv", names=['name'])
    features_name = features_name_df['name'].values
    train_df = pd.DataFrame(data=x_train, columns=features_name)
    train_df['label'] = y_train
    new_x_train = train_df.iloc[:, :-1]
    new_y_train = train_df.iloc[:, -1]
    groupby_data_orginal = train_df.groupby('label').count()

    smote = SMOTE()  # 建立smote模型对象
    x_smote_resampled, y_smote_resampled = smote.fit_resample(new_x_train, new_y_train)
    x_smote_resampled = pd.DataFrame(x_smote_resampled, columns=features_name)
    y_smote_resampled = pd.DataFrame(y_smote_resampled, columns=['label'])
    smote_resampled = pd.concat([x_smote_resampled, y_smote_resampled], axis=1)
    groupby_data_smote = smote_resampled.groupby('label').count()

    x_train = np.array(smote_resampled.iloc[:, :-1])
    y_train = np.array(smote_resampled.iloc[:, -1])

    return x_train, y_train, groupby_data_orginal, groupby_data_smote

def get_physionet_data():
    data_path = "assets/csvs/physionet_valid_csv/physionet_valid_features.csv"
    std_features = featureStandardization(data_path)

    label_path = "assets/csvs/label.csv"
    label_df = pd.read_csv(label_path)
    label_name = label_df["label_name"].values
    label_value = label_df["label_value"].values

    # 生成 all_label 字典
    all_label = {}
    for idx in range(len(label_name)):
        name = label_name[idx]
        value = label_value[idx]
        all_label[name] = value

    # 生成 train_label
    test_label = []
    test_path = "assets/valid/PhysioNet"
    for file in os.listdir(test_path):
        file_num = file.split(".")[0]
        test_label.append(all_label[file_num])

    test_label = np.array(test_label)
    for idx in range(len(test_label)):
        if test_label[idx] == -1: # 正常样本
            test_label[idx] = 0

    return std_features, test_label

def get_pascal_data():
    data_path = "assets/csvs/pascal_valid_csv/pascal_valid_features.csv"
    std_features = featureStandardization(data_path)
    label_path = "assets/csvs/pascal_valid_csv/pascal_label.csv"
    label_df = pd.read_csv(label_path,names=["label"])["label"].values
    label = np.array(label_df)

    for idx in range(len(label)):
        if label[idx] == -1: # 正常样本
            label[idx] = 0

    return std_features,label


def get_kaggle_data():
    kaggle_a_path = "assets/csvs/kaggle_valid_csv/kaggle_a_valid_features.csv"
    a_std_features = featureStandardization(kaggle_a_path)
    kaggle_b_path = "assets/csvs/kaggle_valid_csv/kaggle_b_valid_features.csv"
    b_std_features = featureStandardization(kaggle_b_path)

    a_label_path = "assets/csvs/kaggle_valid_csv/a_label.csv"
    label_a = np.array(pd.read_csv(a_label_path, names=["label"])["label"].values)
    b_label_path = "assets/csvs/kaggle_valid_csv/b_label.csv"
    label_b = np.array(pd.read_csv(b_label_path, names=["label"])["label"].values)

    x_kaggle = []
    x_kaggle.extend(a_std_features)
    x_kaggle.extend(b_std_features)

    y_kaggle = []
    y_kaggle.extend(label_a)
    y_kaggle.extend(label_b)

    for idx in range(len(y_kaggle)):
        if y_kaggle[idx] == -1: # 正常样本
            y_kaggle[idx] = 0

    return x_kaggle, y_kaggle

if __name__ == "__main__":
    path = "assets/valid/Kaggle/set_a\\normal__201101070538.wav"
    wave_data,wave_time,framerate = read_wavefile(path,downsampling_ratio=40)
    print(wave_data)
