import sys

sys.path.append("/home/zxh/otu_classifier/")
from src.utils.fileUtils import del_file_2
import pandas as pd
from sklearn.decomposition import PCA, FastICA,NMF
from sklearn.model_selection import train_test_split
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
import pickle
import numpy as np
import math
import os

from sklearn.preprocessing import MinMaxScaler

from src.config import config, params
from src.datas.crossValidation import fractionaldata
from src.datas.segmentationLabelsAndSample import labelAndSample
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.preprocessing import OneHotEncoder
# from keras.utils import to_categorical
from xgboost import XGBClassifier
from xgboost import plot_importance
from sklearn.manifold import LocallyLinearEmbedding
from sklearn.feature_selection import VarianceThreshold

hps = params.get_default_params()


def load_std_map_data(std_file, sample_id_map_list):
    """读取文件返回矩阵"""
    stdData = pd.read_csv(std_file, sep="\t", header=None)
    sample_id_list = stdData.iloc[0, 1:-1].values  # 样本编号
    sample_features_key_list = stdData.iloc[1:, -1].values  # 样本特征的key
    sample_features_value_list = stdData.iloc[1:, 1:-1].values  # 样本特征的值
    sample_id_map_dict = dict(sample_id_map_list.tolist())
    Y, classList = [], []
    sample_id_list_tmp = sample_id_list
    for id, val in enumerate(sample_id_list):
        if str(val) in sample_id_map_dict.keys():
            if type(val) == np.float64:
                Y.append({str(int(val)), sample_id_map_dict[str(int(val))]})
                classList.append(sample_id_map_dict[str(int(val))])
            if type(val) == str:
                Y.append({val, sample_id_map_dict[val]})
                classList.append(sample_id_map_dict[val])
            if type(val) == np.int64:
                Y.append({str(val), sample_id_map_dict[str(val)]})
                classList.append(sample_id_map_dict[str(val)])
        else:
            sample_id_list_tmp_tolist = [str(x) for x in sample_id_list_tmp.tolist()]
            v_index = sample_id_list_tmp_tolist.index(str(val))
            sample_id_list_tmp = np.delete(sample_id_list_tmp, v_index, axis=0)
            sample_features_value_list = np.delete(sample_features_value_list, v_index, axis=1)
    Y = np.array(Y)
    classList = np.array(classList)
    # Y = Y.reshape((Y.shape[0],))  # [[0],[0]]转[0,0]
    X = sample_features_value_list.T.astype(float)
    X, Y, num_classes = class_count(X, Y, classList)
    hps.num_classes = num_classes
    X = indefinitelengthconversion(X, sample_features_key_list)
    return X, Y


def class_count(X, Y, classList, class_list=["n", "a", "c"]):
    """分几类计算，分那几类"""
    for i, v in enumerate(class_list):
        if i != 0:
            result_x = np.concatenate((X[classList == v], result_x), axis=0)
            result_y = np.concatenate((Y[classList == v], result_y), axis=0)
        else:
            result_x = X[classList == v]
            result_y = Y[classList == v]
    return result_x, result_y, len(class_list)


def std_map_txt_to_save(std_file_list, map_file_list):
    """读取std，map文件并且保存为pkl"""
    for mi in range(len(map_file_list)):
        map_file = map_file_list[mi]
        mapData = pd.read_csv(map_file, sep="\t", header=0)
        sample_id_map_list_mi = mapData.loc[:, ["#SampleID", "Type"]].values  # 样本编号和分类的映射
        if mi != 0:
            sample_id_map_list = np.concatenate((sample_id_map_list_mi, sample_id_map_list), axis=0)
        else:
            sample_id_map_list = sample_id_map_list_mi

    for i in range(len(std_file_list)):
        X, Y = load_std_map_data(std_file_list[i], sample_id_map_list)
        if i != 0:
            result_x = np.concatenate((X, result_x), axis=0)
            result_y = np.concatenate((Y, result_y), axis=0)
        else:
            result_x = X
            result_y = Y
    production_train_test_val(result_x, result_y)


def production_train_test_val(result_x, result_y):
    """生成训练测试验证数据"""
    X_train, y_train = process_train_test_val(result_x, result_y)
    pickle.dump(
        (X_train, y_train),
        open(config.val_pkl, "wb")
    )


def process_train_test_val(result_x, result_y):
    need_zeros = np.zeros((result_x.shape[0],))
    selector_x, _ = normalization_data(selector_train(result_x), None)
    nmf_x, _ = normalization_data(nmf_train(result_x), None)
    result_x, _ = normalization_data(result_x, None)
    pca_x = pca_train(result_x)
    lda_x = lda_train(result_x, result_y)
    ica_x = ica_train(result_x)
    lle_x = lle_train(result_x)
    result_x = np.concatenate((pca_x, lda_x, selector_x, ica_x, lle_x, result_x, nmf_x), axis=1)
    one_dimen = 25
    two_dimen = math.ceil(result_x.shape[1] / one_dimen)
    # two_dimen = 6
    for i in range(one_dimen * two_dimen - result_x.shape[1]):
        # need_zeros = np.power(result_x[:, i], i)
        result_x = np.insert(result_x, result_x.shape[1], values=need_zeros, axis=1)
    result_x = np.reshape(result_x, (result_x.shape[0], one_dimen, two_dimen, 1))
    return result_x, result_y


def Quantile_data(X_train):
    features_key_list = pickle.load(open(config.features_key_list_file, 'rb'))
    quantile_dict_list = []
    for i in range(X_train.shape[1]):
        if os.path.exists(config.quantile_pkl):
            quantile_dict_list = pickle.load(open(config.quantile_pkl, 'rb'))
            quantile_dict = getHeigherAndlowerByQuantileList(quantile_dict_list, features_key_list[i])
            higher_v = quantile_dict["higher_v"][0] if type(quantile_dict["higher_v"]) == np.ndarray else quantile_dict[
                "higher_v"]
            lower_v = quantile_dict["lower_v"][0] if type(quantile_dict["lower_v"]) == np.ndarray else quantile_dict[
                "lower_v"]
        else:
            lower_q = np.quantile(X_train[:, i], 0.25, keepdims=True, interpolation='lower')  # 下四分位数
            higher_q = np.quantile(X_train[:, i], 0.75, keepdims=True, interpolation='higher')  # 上四分位数
            int_r = higher_q - lower_q  # 四分位距
            k = 1.5
            higher_v = lower_q + k * int_r
            lower_v = higher_q - k * int_r
            if lower_v < 0:
                lower_v = 0
            quantile_dict = {"key": features_key_list[i], "lower_v": lower_v, "higher_v": higher_v}
            quantile_dict_list.append(quantile_dict)
        X_train[:, i][X_train[:, i] > higher_v] = higher_v
        X_train[:, i][X_train[:, i] < lower_v] = lower_v
    return X_train, quantile_dict_list


def getHeigherAndlowerByQuantileList(quantile_dict_list, key):
    for quantile_dict in quantile_dict_list:
        if quantile_dict["key"] == key:
            return quantile_dict


def Quantile_data_result(X_train):
    """4分位处理数据上下限"""
    X_train, quantile_dict_list = Quantile_data(X_train)

    if os.path.exists(config.quantile_pkl):
        print("has quantile_pkl")
    else:
        pickle.dump(
            quantile_dict_list,
            open(config.quantile_pkl, "wb")
        )
    return X_train


def balanced_sample(X, Y):
    """平衡样本让正负样本为1：1"""
    y_lables, y_sample = labelAndSample(Y)
    sum0 = sum(y_lables == 0)
    sum1 = sum(y_lables == 1)
    sum2 = sum(y_lables == 2)
    min = sum0
    min = sum1 if min > sum1 else min
    min = sum2 if min > sum2 else min
    if min == sum0:
        Y = np.concatenate((Y[y_lables == 0], Y[y_lables == 1][:sum0], Y[y_lables == 2][:sum0]), axis=0)
        X = np.concatenate((X[y_lables == 0], X[y_lables == 1][:sum0], X[y_lables == 2][:sum0]), axis=0)
    if min == sum1:
        Y = np.concatenate((Y[y_lables == 0][:sum1], Y[y_lables == 1], Y[y_lables == 2][:sum1]), axis=0)
        X = np.concatenate((X[y_lables == 0][:sum1], X[y_lables == 1], X[y_lables == 2][:sum1]), axis=0)
    if min == sum2:
        Y = np.concatenate((Y[y_lables == 0][:sum2], Y[y_lables == 1][:sum2], Y[y_lables == 2]), axis=0)
        X = np.concatenate((X[y_lables == 0][:sum2], X[y_lables == 1][:sum2], X[y_lables == 2]), axis=0)
    return X, Y


def normalization_data(X_train, X_test):
    """对训练和测试数据进行标准化处理"""
    maxv = math.log(np.max(X_train))
    for i in range(len(X_train)):
        for j in range(len(X_train[i])):
            if X_train[i][j] != 0:
                X_train[i][j] = math.log(X_train[i][j]) / maxv
    """
    # 最值归一化
    # 归一化
    if os.path.exists(config.minmax_pkl):
        minMaxScaler = pickle.load(open(config.minmax_pkl, 'rb'))
    else:
        minMaxScaler = MinMaxScaler()
        minMaxScaler.fit(X_train)
        pickle.dump(
            minMaxScaler,
            open(config.minmax_pkl, "wb")
        )
    X_train = minMaxScaler.transform(X_train)
    # 测试数据归一化
    if X_test:
        X_test = minMaxScaler.transform(X_test)
    """
    return X_train, X_test


def shuffle_save_pkl(X, Y, shuffle=True):
    """洗牌并将洗好的数据保存到pkl文件中"""
    if shuffle:
        X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2)
        # pickle 保存和读取 内容
        pickle.dump(
            (X_train, y_train, hps.num_classes),
            open(config.train_pkl, "wb")
        )
        pickle.dump(
            (X_test, y_test),
            open(config.test_pkl, "wb")
        )
    else:
        # pickle 保存和读取 内容
        pickle.dump(
            (X, Y),
            open(config.val_pkl, "wb")
        )


def indefinitelengthconversion_getKey(std_file_list):
    """不定长转换之获取所有的特征key，为下一步不定长转换做准备"""
    for i in range(len(std_file_list)):
        stdData = pd.read_csv(std_file_list[i], sep="\t", header=None)
        sample_features_key_list = stdData.iloc[1:, -1].values  # 样本特征的key
        if i != 0:
            result = np.concatenate((sample_features_key_list, result),
                                    axis=0)
        else:
            result = sample_features_key_list
    features_key_list = np.unique(result)  # 所有不重复的特征key
    pickle.dump(
        features_key_list,
        open(config.features_key_list_file, "wb")
    )


def selector_train(X):
    """方差 选择方差大于1的值"""
    if os.path.exists(config.selector_pkl):
        selector = pickle.load(open(config.selector_pkl, 'rb'))
        reduced_x = selector.transform(X)
    else:
        selector = VarianceThreshold(1)  # 方差阈值值，
        selector.fit(X)
        reduced_x = selector.transform(X)  # 进行特征选择
        pickle.dump(
            selector,
            open(config.selector_pkl, "wb")
        )
    return reduced_x

def nmf_train(X):
    """nmf 降维"""
    if os.path.exists(config.nmf_pkl):
        nmf = pickle.load(open(config.nmf_pkl, 'rb'))
        reduced_x = nmf.transform(X)
    else:
        nmf = NMF(n_components=3)
        nmf.fit(X)
        reduced_x = nmf.transform(X)
        pickle.dump(
            nmf,
            open(config.nmf_pkl, "wb")
        )
    return reduced_x

def lle_train(X):
    """lle 降维"""
    if os.path.exists(config.lle_pkl):
        lle = pickle.load(open(config.lle_pkl, 'rb'))
        reduced_x = lle.transform(X)
    else:
        lle = LocallyLinearEmbedding(n_components=36, n_neighbors=10, random_state=42)
        lle.fit(X)
        reduced_x = lle.transform(X)
        pickle.dump(
            lle,
            open(config.lle_pkl, "wb")
        )
    return reduced_x


def lda_train(X, Y):
    """lda 降维"""
    y_train_lables, _ = labelAndSample(Y)
    if os.path.exists(config.lda_pkl):
        lda = pickle.load(open(config.lda_pkl, 'rb'))
        reduced_x = lda.transform(X)
    else:
        lda = LDA(n_components=2)
        lda.fit(X, y_train_lables)
        reduced_x = lda.transform(X)
        pickle.dump(
            lda,
            open(config.lda_pkl, "wb")
        )
    return reduced_x


def ica_train(X):
    if os.path.exists(config.ica_pkl):
        ica = pickle.load(open(config.ica_pkl, 'rb'))
        reduced_x = ica.transform(X)
    else:
        ica = FastICA(n_components=36)
        ica.fit(X)
        reduced_x = ica.transform(X)
        pickle.dump(
            ica,
            open(config.ica_pkl, "wb")
        )
    return reduced_x


def pca_train(X):
    """pca 降维"""
    if os.path.exists(config.pca_pkl):
        pca = pickle.load(open(config.pca_pkl, 'rb'))
        reduced_x = pca.transform(X)
    else:
        n_components = 36 if X.shape[0] > 36 else X.shape[0]
        pca = PCA(n_components=n_components)
        pca.fit(X)
        reduced_x = pca.transform(X)
        pickle.dump(
            pca,
            open(config.pca_pkl, "wb")
        )
    return reduced_x


def delete_not_has_attr(X, sample_features_key_list, features_key_list):
    """删除没有的属性"""
    tmpList = sample_features_key_list
    for i2, v2 in enumerate(tmpList):
        if v2 not in features_key_list:
            v_index = sample_features_key_list.tolist().index(v2)
            sample_features_key_list = np.delete(sample_features_key_list, v_index, axis=0)
            X = np.delete(X, v_index, axis=1)
    return X, sample_features_key_list


def indefinitelengthconversion(X, sample_features_key_list):
    """不定长转换"""
    features_key_list = pickle.load(open(config.features_key_list_file, 'rb'))
    X, sample_features_key_list = delete_not_has_attr(X, sample_features_key_list, features_key_list)
    need_zeros = np.zeros((X.shape[0],))
    # index = []
    for i, v in enumerate(features_key_list):
        if i >= len(sample_features_key_list):
            sample_features_key_list = np.insert(sample_features_key_list, i, values=v, axis=0)
            X = np.insert(X, i, values=need_zeros, axis=1)
            continue
        if v == sample_features_key_list[i]:
            continue
        else:
            if v in sample_features_key_list:
                v_index = sample_features_key_list.tolist().index(v)
                v_index_x = X[:, v_index]
                sample_features_key_list = np.insert(sample_features_key_list, i, values=v, axis=0)
                X = np.insert(X, i, values=v_index_x, axis=1)
                sample_features_key_list = np.delete(sample_features_key_list, v_index, axis=0)
                X = np.delete(X, v_index, axis=1)
            else:
                sample_features_key_list = np.insert(sample_features_key_list, i, values=v, axis=0)
                X = np.insert(X, i, values=need_zeros, axis=1)

    X = X[:, :features_key_list.shape[0]]
    # index=np.array(index)
    # needInsertIndex = np.arange(0, features_key_list.shape[0])[index == False]
    return X


def noVal_data_create(result_x, result_y):
    """如果没有验证集的时候调用"""
    X_train, X_test, y_train, y_test = train_test_split(result_x, result_y, test_size=0.2,
                                                        random_state=result_x.shape[0])
    pickle.dump(
        (X_test, y_test),
        open(config.test_pkl, "wb")
    )
    return X_train, X_test, y_train, y_test


if __name__ == '__main__':
    # load_data_and_save()

    data_list = [
        "../../data/originData/yansu177-2.txt",
    ]

    std_map_txt_to_save(data_list,
                        [config.map_file])
