from torch.utils import data 
import numpy as np
from torchvision.datasets import MNIST
from torchvision.transforms import ToTensor, Lambda, Compose
import pandas as pd
# import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from imblearn.over_sampling import RandomOverSampler ,SMOTE
from sklearn import preprocessing
from datasets.base.data_load import DataManager
from sklearn.model_selection import train_test_split


class Maldroid(data.Dataset):
    """
    一个恶意软件数据集
    """

    def __init__(self, root, dataidxs=None, train=True, transform=None, target_transform=None,
                 download=False):
        self.root = root
        self.dataidxs = dataidxs
        self.train = train
        self.transform = transform
        self.target_transform = target_transform
        self.download = download

        self.train_data, self.train_targets,self.test_data,self.test_targets = self.__build_truncated_dataset__()
   

        
    def __build_truncated_dataset__(self):
        data_pd = pd.read_csv(self.root)
        data_np = data_pd.values        # 2 数据处理-打乱数据
        # print(data_np[:5])
        data_np = shuffle(data_np)
        # random.shuffle(data_np) # random the dataset
        x,y = data_np[:,0:470],data_np[:,470]-1
        # 对小类别的数据进行重复过采样
        # ros = RandomOverSampler(random_state=0)
        ros = SMOTE(random_state=0)
        x, y = ros.fit_resample(x, y)


        # 数据最大最小值归一化
        minmax_scaler = preprocessing.MinMaxScaler()
        x = minmax_scaler.fit_transform(x)

        # 2 数据处理-分割数据集
        x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.2,random_state=0)
        return x_train,y_train,x_test,y_test


    def __getitem__(self, index):
        data, target = self.train_data[index], self.train_targets[index]
        return data, target

    def __len__(self):
        return len(self.train_data)


# 1. 从内存中加载数据并转换为tensor格式的数据
# 2. 根据非独立同分布的设置，讲数据划分为多分，并返回多分数据的numpy数据数据。因为返回dataset反而不利于进一步的划分和处理。
# 主要完成数据的多个客户端划分。不划分训练数据还是测试数据。因为可能存在一下多种情况：
# a. 分为测试节点和训练节点
# b. 每个客户端既包含测试数据也包含训练数据。单个客户端还可能面临其他情况的划分。
class MaldroidDataManager(DataManager):
    # 将数据集加载到内存中。
    def __init__(self,dir='data'):
        super(MaldroidDataManager,self).__init__(dir)

    # 1 加载数据、并对数据进行预处理。
    def load_data(self):
        dataset = Maldroid(self.dir, train=True, transform=ToTensor(), download=True)
        # self.train_data = np.vstack((train_dataset.data.numpy(),test_dataset.data.numpy()))
        # self.train_targets =np.hstack((train_dataset.targets.numpy(),test_dataset.targets.numpy()))
        train_data = dataset.train_data
        train_targets =dataset.train_targets
        test_data = dataset.test_data
        test_targets =dataset.test_targets
        return train_data,train_targets,test_data,test_targets

# 使用单例模式加载一个数据集。在import的时候，执行这段代码，将全局变量dataManager导入到内存当中。
dataManager = MaldroidDataManager(dir='../data/maldroid2020/feature_vectors_syscallsbinders_frequency_5_Cat.csv')