from numpy.lib.arraysetops import isin
from torchvision import datasets
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
from torch.utils.data import Dataset
import torch
import pickle
import os
import numpy as np

from util.utils import *

class BNNTDATA(Dataset):
    """
    二分神经网络树数据集预处理
    """
    cached_train_file_list = ["data", "bnntree", "train_cached.pk"]
    cached_test_file_list = ["data", "bnntree", "test_cached.pk"]

    def __init__(self, batch_size, bnnt_datasets=datasets.MNIST, cache=True, transform=transforms.ToTensor()):
        """
        datasets 支持类型：torch.Tensor, numpy.ndarray
        """
        self.batch_size = batch_size
        self.cache = cache
        self.tranform = transform
        self.bnnt_datasets = bnnt_datasets

        self.cached_train_file_list.insert(2, self.bnnt_datasets.__name__)
        self.cached_test_file_list.insert(2, self.bnnt_datasets.__name__)
        self.cached_train_file = os.path.join(*self.cached_train_file_list)
        self.cached_test_file = os.path.join(*self.cached_test_file_list)
        self.parse_data()        

    def parse_data(self):      
        self.train_dict = {}
        self.test_dict = {}
        if self.cache:
            try:
                f = open(self.cached_train_file, "rb")
                self.train_dict = pickle.load(f)
                f.close()
            except Exception as e:
                print("train data cache is not found. " + self.cached_train_file)
            
            try:
                f = open(self.cached_test_file, "rb")
                self.test_dict = pickle.load(f)
                f.close()
            except Exception as e:
                print("test data cache is not found. " + self.cached_test_file)
        print(self.cached_train_file_list[2] + " data loading...")
        if len(self.train_dict) == 0: 
            train_data = self.bnnt_datasets(root="data", train=True, transform=self.tranform, download=True)
            self.train_dict = self.split2dic(train_data)
            mkdirs(self.cached_train_file)
            with open(self.cached_train_file, "wb") as f:
                pickle.dump(self.train_dict, f)          

        if len(self.test_dict) == 0:
            test_data = self.bnnt_datasets(root="data", train=False, transform=transforms.ToTensor(), download=True)
            self.test_dict = self.split2dic(test_data)
            mkdirs(self.cached_test_file)
            with open(self.cached_test_file, "wb") as f:
                pickle.dump(self.test_dict, f)
        print("Done")

    def split2dic(self, data): 
        unsqueeze, cat, _, _ = self.type_select(data.data[0])
        data_dict = {}
        size_ = len(data.targets)
        for index, y in enumerate(data.targets):
            if isinstance(y, torch.Tensor):
                y = y.item()
            # d = torch.unsqueeze(data.data[index], dim=0)
            d = unsqueeze(data.data[index], 0)
            if y in data_dict:
                # data_dict[y] = torch.cat((data_dict[y], d), 0)
                data_dict[y] = cat((data_dict[y], d), 0)
            else:
                data_dict[y] = d
            print(f"\rdata parsing progress: [{index}/{size_}]", end="")
        return data_dict

    def get_test_dataloader(self):
        test_data = self.bnnt_datasets(root="data", train=False, transform=transforms.ToTensor(), download=True)
        return DataLoader(test_data, batch_size=self.batch_size)

    def get_subdata(self, lb, rb, istrain=True):
        """
        lb, lb+1, ... , rb-1, rb
        """
        data_dict = self.train_dict if istrain else self.test_dict
        _, cat, ones, dtype = self.type_select(data_dict[lb][0])

        d = None
        labels = None
        for i in range(lb, rb+1):
            if d is None:
                d = data_dict[i]
                # labels = torch.ones(len(d)).long() * i
                labels = ones(len(d), dtype=dtype) * i
            else:
                # d = torch.cat((d, data_dict[i]), dim=0)
                # label = torch.ones(len(data_dict[i])).long() * i
                # labels = torch.cat((labels, label), dim=0)
                d = cat((d, data_dict[i]), 0)
                label = ones(len(data_dict[i]), dtype=dtype) * i
                labels = cat((labels, label), 0)
        assert len(d) == len(labels), "数据长度和标签长度不等"
        return self.get_dataloader(d, labels, istrain)

    def get_dataloader(self, dataset, labels, istrain):
        dataset = BNNTDATASET(dataset, labels)
        return DataLoader(dataset, batch_size=self.batch_size, shuffle=istrain)

    def type_select(self, data):
        if isinstance(data, torch.Tensor):
            unsqueeze = torch.unsqueeze
            cat = torch.cat
            ones = torch.ones
            dtype = torch.long
        elif isinstance(data, np.ndarray):
            unsqueeze = np.expand_dims
            cat = np.concatenate  
            ones = np.ones
            dtype= np.int64        
        return unsqueeze, cat, ones, dtype
        

class BNNTDATASET:
    def __init__(self, data, label):
        super(BNNTDATASET, self).__init__()
        self.data = data
        self.labels = label
        self.transform = transforms.ToTensor()
        
    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        sample = self.data[index]
        label = self.labels[index]
        if self.transform is not None:
            if isinstance(sample, torch.Tensor):
                sample = self.transform(sample.detach().numpy())
            else:
                sample = self.transform(sample)
        return sample, label