import heapq

import pandas as pd
import numpy as np
import tensorflow as tf
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler

from Corr_computer import corr, mic, copula


class ZhengqiLoader():
    def __init__(self,url):
        super().__init__()
        """
        数据加载
        """
        self.train_data = pd.read_table(url)

    def preprocess(self,batch_size):
        """
        数据预处理
        """
        train_data = self.train_data[self.train_data['V9'] > -7.5]   # 异常值处理
        train_data = tf.convert_to_tensor(train_data, dtype=tf.float32)
        self.train = train_data[:, 0:38]
        pca = PCA(n_components=0.975)  # 对训练集的特征进行pca，(25*25)方便卷积  pca降维
        self.train_pca = pca.fit_transform(self.train)
        "数据标准化"
        self.target = train_data[:, -1]
        self.target = np.reshape(self.target, [-1, 1])
        # 对数据集进行分别归一化
        train_scaler = MinMaxScaler()
        self.train_MinMaxdata = train_scaler.fit_transform(self.train_pca)
        target_scaler = MinMaxScaler()
        self.target_MinMaxdata = target_scaler.fit_transform(self.target)
        "划分数据集"

        self.x_train, self.x_test, self.y_train, self.y_test = train_test_split(self.train_MinMaxdata, self.target_MinMaxdata, test_size=0.2,
                                                            random_state=0)  # 二八划分数据集
        self.x_train_data = tf.reshape(self.x_train, [-1, 5, 5, 1])
        self.x_test_data = tf.reshape(self.x_test, [-1, 5, 5, 1])
        # Prepare the training dataset.
        self.train_dataset = tf.data.Dataset.from_tensor_slices((self.x_train_data, self.y_train))
        self.train_dataset = self.train_dataset.shuffle(buffer_size=100).batch(batch_size)

        # Prepare the validation dataset.
        # self.val_dataset = tf.data.Dataset.from_tensor_slices((self.x_test_data, self.y_test))
        # self.val_dataset = self.val_dataset.batch(batch_size)

        return self.x_train_data,self.train_dataset,self.y_train,self.x_test_data,self.y_test

    def discriminator_data_loader(self):
        """
        相关性分析选出与主要变量最相关的前5个变量将其五个值合并到主要变量中形成[2308,6]，作为鉴别器的输入，主要变量为鉴别器的输出
        @param batch_size:  批次大小
        @return: Discimin_train_data GAN-鉴别器的训练数据集
        """
        x_train = pd.DataFrame(self.x_train)
        y_train = pd.DataFrame(self.y_train,columns=["target"])

        miclist = mic(x_train,y_train["target"])
        print(miclist)
        """找出mic系数最大的,极其索引"""
        data_mic_index = list(map(miclist.index, heapq.nlargest(5, miclist)))
        print(data_mic_index)
        Discimin_x_data=self.x_train[:,data_mic_index]
        Discimin_x_data = tf.concat([y_train,Discimin_x_data],axis=1)

        # Discimin_train_data = tf.data.Dataset.from_tensor_slices((Discimin_x_data)
        return Discimin_x_data
    # def generate_data_loader(self):
    #     """
    #
    #     @return:
    #     """
    #     return generate_train_data






# if __name__ == '__main__':
#     URL = './zhengqi_train.txt'
#     BACH_SIZE = 10
#     loader = ZhengqiLoader(URL)
#     x_train_data, train_dataset, y_train, x_test_data,y_test = loader.preprocess(BACH_SIZE )
#     # print(train_dataset.shape)
#     a=loader.discriminator_data_loader()
#     print(y_train)
#     print(a)
#     print(a.shape)





