import pandas as pd
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
import pickle
import numpy as np
import math
import os

from sklearn.preprocessing import MinMaxScaler

from src.config import config, params
from src.datas.crossValidation import fractionaldata
from src.datas.segmentationLabelsAndSample import labelAndSample

hps = params.get_default_params()


def load_std_map_data(std_file):
    """读取文件返回矩阵"""
    stdData = pd.read_csv(std_file, sep="\t", header=None)
    sample_id_list = stdData.iloc[0, 1:-1].values  # 样本编号
    sample_features_key_list = stdData.iloc[1:, -1].values  # 样本特征的key
    sample_features_value_list = stdData.iloc[1:, 1:-1].values  # 样本特征的值
    # Y = Y.reshape((Y.shape[0],))  # [[0],[0]]转[0,0]
    X = sample_features_value_list.T.astype(float)
    X = indefinitelengthconversion(X, sample_features_key_list)
    return X, sample_id_list


def std_map_txt_to_save(std_file_list):
    """读取std，map文件并且保存为pkl"""
    for i in range(len(std_file_list)):
        X, sample_id_list = load_std_map_data(std_file_list[i])
        if i != 0:
            result_x = np.concatenate((X, result_x), axis=0)
            result_sample_id = np.concatenate((sample_id_list, result_sample_id), axis=0)
        else:
            result_x = X
            result_sample_id = sample_id_list
    need_zeros = np.zeros((result_x.shape[0],))
    selector_x, _ = normalization_data(selector_train(result_x), None)
    nmf_x, _ = normalization_data(nmf_train(result_x), None)
    result_x, _ = normalization_data(result_x, None)
    pca_x = pca_train(result_x)
    lda_x = lda_train(result_x)
    ica_x = ica_train(result_x)
    lle_x = lle_train(result_x)
    result_x = np.concatenate((pca_x, lda_x, selector_x, ica_x, lle_x, result_x, nmf_x), axis=1)
    one_dimen = 25
    two_dimen = math.ceil(result_x.shape[1] / one_dimen)
    # two_dimen = 6
    for i in range(one_dimen * two_dimen - result_x.shape[1]):
        # need_zeros = np.power(result_x[:, i], i)
        result_x = np.insert(result_x, result_x.shape[1], values=need_zeros, axis=1)
    result_x = np.reshape(result_x, (result_x.shape[0], one_dimen, two_dimen, 1))
    # pickle 保存和读取 内容
    pickle.dump(
        (result_x, result_sample_id),
        open(config.production_pkl, "wb")
    )

def lle_train(X):
    """lle 降维"""
    lle = pickle.load(open(config.lle_pkl, 'rb'))
    reduced_x = lle.transform(X)
    return reduced_x

def ica_train(X):
    ica = pickle.load(open(config.ica_pkl, 'rb'))
    reduced_x = ica.transform(X)
    return reduced_x

def nmf_train(X):
    """nmf 降维"""
    nmf = pickle.load(open(config.nmf_pkl, 'rb'))
    reduced_x = nmf.transform(X)
    return reduced_x

def selector_train(X):
    """方差 选择方差大于1的值"""
    selector = pickle.load(open(config.selector_pkl, 'rb'))
    reduced_x = selector.transform(X)
    return reduced_x


def normalization_data(X_train, X_test):
    """对训练和测试数据进行标准化处理"""
    maxv = math.log(np.max(X_train))
    for i in range(len(X_train)):
        for j in range(len(X_train[i])):
            if X_train[i][j] != 0:
                X_train[i][j] = math.log(X_train[i][j]) / maxv
    return X_train, X_test

def lda_train(X):
    lda = pickle.load(open(config.lda_pkl, 'rb'))
    reduced_x = lda.transform(X)
    return reduced_x


def pca_train(X):
    """pca 降维"""
    pca = pickle.load(open(config.pca_pkl, 'rb'))
    reduced_x = pca.transform(X)
    return reduced_x


def delete_not_has_attr(X, sample_features_key_list, features_key_list):
    """删除没有的属性"""
    tmpList = sample_features_key_list
    for i2, v2 in enumerate(tmpList):
        if v2 not in features_key_list:
            v_index = sample_features_key_list.tolist().index(v2)
            sample_features_key_list = np.delete(sample_features_key_list, v_index, axis=0)
            X = np.delete(X, v_index, axis=1)
    return X, sample_features_key_list


def indefinitelengthconversion(X, sample_features_key_list):
    """不定长转换"""
    features_key_list = pickle.load(open(config.features_key_list_file, 'rb'))
    X, sample_features_key_list = delete_not_has_attr(X, sample_features_key_list, features_key_list)
    need_zeros = np.zeros((X.shape[0],))
    # index = []
    for i, v in enumerate(features_key_list):
        if i >= len(sample_features_key_list):
            sample_features_key_list = np.insert(sample_features_key_list, i, values=v, axis=0)
            X = np.insert(X, i, values=need_zeros, axis=1)
            continue
        if v == sample_features_key_list[i]:
            continue
        else:
            if v in sample_features_key_list:
                v_index = sample_features_key_list.tolist().index(v)
                v_index_x = X[:, v_index]
                sample_features_key_list = np.insert(sample_features_key_list, i, values=v, axis=0)
                X = np.insert(X, i, values=v_index_x, axis=1)
                sample_features_key_list = np.delete(sample_features_key_list, v_index, axis=0)
                X = np.delete(X, v_index, axis=1)
            else:
                sample_features_key_list = np.insert(sample_features_key_list, i, values=v, axis=0)
                X = np.insert(X, i, values=need_zeros, axis=1)
    X = X[:, :features_key_list.shape[0]]
    return X


if __name__ == '__main__':
    std_map_txt_to_save(["../../data/originData/yansu177-2.txt", ])
