from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA, IncrementalPCA, FactorAnalysis, FastICA, NMF, LatentDirichletAllocation, TruncatedSVD
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis
from sklearn.manifold import TSNE
from sklearn.feature_selection import VarianceThreshold
import numpy as np
from utils.get_data import read_data, get_train_test_data


class Pretreatment(object):
    """
    预处理
    """
    @staticmethod
    def dataframe_fill_na(df, y_0_index, y_1_index, col):
        """
        逐列填充空值
        :param df:
        :param y_0_index:
        :param y_1_index:
        :param col:
        :return:
        """

        y_mean = df.groupby('y')[col].mean()
        zero = y_mean.iloc[0]
        one = y_mean.iloc[1]

        null_index = df[col][df[col].isnull()].index.tolist()
        fill_0_index = list(set(y_0_index) & set(null_index))
        fill_1_index = list(set(y_1_index) & set(null_index))

        df[col][fill_0_index] = zero
        df[col][fill_1_index] = one

        return df

    @staticmethod
    def check_missing(missing_rate, df):
        """
        缺失值检测：剔除缺失值的列
        :return:
        """
        columns = ['y']
        null_col = []
        for col in list(df.columns)[1:]:
            null_rate = len(df[col].dropna()) / len(df)
            if null_rate >= missing_rate:
                columns.append(col)

            if col in columns and null_rate < 1.0:
                null_col.append(col)

        return df[columns], null_col

    @staticmethod
    def check_std(min_std, df, columns):
        """
        方差检测：剔除方差过小的列
        :return:
        """
        data_new_check_columns = list(set(columns[1:]) & set(df.columns.tolist()))
        data_new_check = df[data_new_check_columns]
        info = data_new_check.std()
        remove_columns = info[info <= min_std].index.tolist()

        return df.drop(remove_columns, axis=1)

    def pretreatment_base(self, missing_rate=0.8, min_std=0.5):
        """
        预处理
        :param missing_rate: 缺失值比例过大
        :param min_std:方差过小
        :return:
        """
        data = read_data()  # 11017
        data = data.iloc[:, 1:]

        # 查看数据概况，如有不符合数据范围的数据，需要剔除
        # data.describe().T.to_excel('test.xlsx')
        # print(data.describe().T)

        # 缺失值比率(missing_rate)：剔除缺失值过多的属性
        data_new, null_col = self.check_missing(missing_rate, data)

        # 剔除无性别，无年龄的行
        gender_index = data_new['x_001'][data_new['x_001'].isnull()].index.tolist()
        null_col.remove('x_001')
        null_col.remove('x_002')

        data_new = data_new.drop(gender_index)  # 10986

        #  对其它的缺失值，按y的分类结果，进行均值填充
        y_0_index = data_new['y'][data_new['y'] == 0].index.tolist()
        y_1_index = data_new['y'][data_new['y'] == 1].index.tolist()
        for col in null_col:
            data_new = self.dataframe_fill_na(data_new, y_0_index, y_1_index, col)

        check_column = ['y', 'x_020', 'x_021', 'x_022', 'x_023', 'x_024', 'x_025', 'x_026', 'x_028', 'x_029', 'x_030',
                        'x_031', 'x_032', 'x_034', 'x_035', 'x_036', 'x_037', 'x_038', 'x_039', 'x_040', 'x_041',
                        'x_042', 'x_043', 'x_044', 'x_045', 'x_046', 'x_047', 'x_048', 'x_049', 'x_050', 'x_051',
                        'x_052', 'x_053', 'x_054', 'x_055', 'x_056', 'x_057', 'x_058', 'x_059', 'x_060', 'x_061',
                        'x_062', 'x_063', 'x_064', 'x_065', 'x_066', 'x_067', 'x_068', 'x_069', 'x_070', 'x_071',
                        'x_072', 'x_073', 'x_074', 'x_075',
                        'x_076', 'x_077', 'x_078', 'x_079', 'x_080', 'x_081', 'x_082', 'x_083', 'x_084', 'x_085',
                        'x_086', 'x_087', 'x_088', 'x_089', 'x_090', 'x_091', 'x_092', 'x_093', 'x_094', 'x_095',
                        'x_096', 'x_097', 'x_098', 'x_099', 'x_100', 'x_101', 'x_102', 'x_103', 'x_104', 'x_105',
                        'x_106', 'x_107', 'x_108',  'x_109', 'x_110', 'x_111', 'x_112', 'x_113', 'x_114', 'x_115',
                        'x_116', 'x_117', 'x_118', 'x_119', 'x_120', 'x_121', 'x_122', 'x_123', 'x_124', 'x_125',
                        'x_126', 'x_127', 'x_128', 'x_129', 'x_130', 'x_131', 'x_132', 'x_133', 'x_134', 'x_135',
                        'x_136', 'x_137', 'x_138', 'x_139', 'x_140', 'x_141', 'x_142', 'x_143', 'x_144', 'x_145',
                        'x_146', 'x_147', 'x_148', 'x_149', 'x_150', 'x_151', 'x_152', 'x_153', 'x_154', 'x_155',
                        'x_156', 'x_157', 'x_158', 'x_159', 'x_160', 'x_161', 'x_162', 'x_163', 'x_164', 'x_165',
                        'x_166', 'x_167', 'x_168', 'x_169', 'x_170', 'x_171', 'x_172', 'x_173', 'x_174', 'x_175',
                        'x_176', 'x_177', 'x_178', 'x_179', 'x_180', 'x_181', 'x_182', 'x_183', 'x_184', 'x_185',
                        'x_186', 'x_187', 'x_188', 'x_189', 'x_190', 'x_191', 'x_192', 'x_193', 'x_194', 'x_195',
                        'x_196', 'x_197', 'x_198', 'x_199']
        # 方差检测
        data_new = self.check_std(min_std, df=data_new, columns=check_column)

        # 相关性检测
        # print(data_new.corr())

        x_train, x_test, y_train, y_test = get_train_test_data(df=data_new)

        # 数据标准化
        stand_data = StandardScaler()
        return stand_data.fit_transform(x_train), stand_data.fit_transform(x_test), y_train, y_test

    def pre_variance_threshold(self, p=0.8):
        """
        消除方差小的数据
        :param p:
        :return:
        """
        sel = VarianceThreshold(threshold=p * (1 - p))
        x_train, x_test, y_train, y_test = self.pretreatment_base()

        # 数据标准化
        stand_data = StandardScaler()
        x_train = stand_data.fit_transform(x_train)
        x_test = stand_data.fit_transform(x_test)

        return sel.fit_transform(x_train), sel.fit_transform(x_test), y_train, y_test

    def pre_pca(self):
        x_train, x_test, y_train, y_test = self.pretreatment_base()
        # x_train, x_test, y_train, y_test = self.pre_variance_threshold()

        pca = PCA(n_components=80)
        return pca.fit_transform(x_train), pca.fit_transform(x_test), y_train, y_test

    def pre_ipca(self):
        x_train, x_test, y_train, y_test = self.pretreatment_base(min_std=1)

        ipca = IncrementalPCA(n_components=65)
        return ipca.fit_transform(x_train), ipca.fit_transform(x_test), y_train, y_test

    def pre_lda(self):
        """
        一次线性判别：方差低
        :return:
        """
        x_train, x_test, y_train, y_test = self.pretreatment_base(missing_rate=0.55)

        # lda = LinearDiscriminantAnalysis(solver='svd', store_covariance=True)
        lda = LinearDiscriminantAnalysis()
        return lda.fit_transform(x_train, y_train), lda.fit_transform(x_test, y_test), y_train, y_test

    def pre_qda(self):
        """
        二次线性判别：非线性
        :return:
        """
        x_train, x_test, y_train, y_test = self.pretreatment_base(missing_rate=0.55)

        qda = QuadraticDiscriminantAnalysis()

    def pre_svd(self):
        x_train, x_test, y_train, y_test = self.pretreatment_base(missing_rate=0.55)

        # U, S, VT = np.linalg.svd(data)
        svd = TruncatedSVD()
        return svd.fit_transform(x_train, y_train), svd.fit_transform(x_test, y_test), y_train, y_test

    def pre_fa(self):
        x_train, x_test, y_train, y_test = self.pretreatment_base(missing_rate=0.75)

        fa = FactorAnalysis(n_components=63)
        return fa.fit_transform(x_train), fa.fit_transform(x_test), y_train, y_test

    def pre_ica(self):
        x_train, x_test, y_train, y_test = self.pretreatment_base(min_std=1)

        ica = FastICA()
        return ica.fit_transform(x_train), ica.fit_transform(x_test), y_train, y_test

    def pre_nmf(self):
        x_train, x_test, y_train, y_test = self.pretreatment_base(missing_rate=0.85)

        nmf = NMF(n_components=75, init='random', random_state=0)
        return nmf.fit_transform(x_train), nmf.fit_transform(x_test), y_train, y_test

    def pre_latentda(self):
        x_train, x_test, y_train, y_test = self.pretreatment_base()

        latentda = LatentDirichletAllocation(n_components=70, random_state=0)
        return latentda.fit_transform(x_train), latentda.fit_transform(x_test), y_train, y_test

    def pre_tsne(self):
        x_train, x_test, y_train, y_test = self.pretreatment_base()

        tsne = TSNE()
        return tsne.fit_transform(x_train), tsne.fit_transform(x_test), y_train, y_test


pretreatment = Pretreatment()
