import copy
import shelve
import numpy as np
import torch
from tqdm import tqdm
from torch.utils.data import Dataset, DataLoader
from easySample import easySample
from dataPre import dataPre
import sys
sys.path.append(r"/home/cyw/projects/function_sim_project/basic_script")


# load_from_shelve=True
load_from_shelve = False
# 建议常关
# 使用这个来区分数据集
modelName = "MGMN"


class MGMN_dataset(Dataset):
    def __init__(self, pairInf, name) -> None:
        self.data = pairInf
        self.datasetName = name
        self.samples = {}
        self.eSample = easySample()
        # print("MGMN正在加载样本")
        # self.load_samples(pairInf)
        # print("加载完成~~")

    def sampels_trans_to_batch(self, lth, att_dimension, batch, ind):
        """
            将样本维度统一
        """
        max_node_size = -float("inf")
        edgeSize = 0
        for i in range(lth):
            max_node_size = max(max_node_size, len(batch[i][ind][1]))
            edgeSize = max(edgeSize, len(batch[i][ind][0][0]))

        batch_adj, batch_att = [], []
        for i in range(lth):
            temp_adj = np.zeros((2, edgeSize))
            temp_att = np.zeros((max_node_size, att_dimension))

            sample_size = len(batch[i][ind][1])  # 当前样本的长度

            temp_adj[:, :len(batch[i][ind][0][0])] = batch[i][ind][0]
            temp_att[:sample_size, :] = batch[i][ind][1]

            batch_adj.append(copy.deepcopy(temp_adj))  # 深拷贝,免得出现错误
            batch_att.append(copy.deepcopy(temp_att))

        res = [torch.FloatTensor(np.array(batch_adj)).long(),
               torch.tensor(np.array(batch_att), dtype=torch.float32)]
        return res

    def adjust_samples_to_same_dimension(self, batch):
        lth = len(batch)
        att_dimension = len(batch[0][0][1][0])
        if att_dimension != 8:
            print("error  MGMN 数据批处理维度错误")
        batch_value = []
        res_x, res_y = [], []
        res_x = self.sampels_trans_to_batch(lth, att_dimension, batch, 0)
        res_y = self.sampels_trans_to_batch(lth, att_dimension, batch, 1)
        for i in range(lth):
            batch_value.append(batch[i][2][0])
        return (torch.tensor(batch_value, dtype=torch.float32), res_x, res_y)

    # def load_samples(self,datas):
    #     """
    #         输入为x,y,value
    #     """
    #     if load_from_shelve and self.datasetName in ["train","test","valid"]:
    #         with shelve.open(r"/home/cyw/projects/function_sim_project/all_data/sampleDatas/{}_predata_for_train/datas_{}".format(modelName,self.datasetName)) as file:
    #             self.samples=file["samples"]
    #             file.close()
    #         print("从之前保存的数据中加载")
    #         return
    #     for i in tqdm(range(len(datas))):
    #         x,y,value=datas[i]
    #         if x not in self.samples:
    #             sample = self.eSample.get_sample(x,modelName)
    #             self.samples[x]=sample
    #         if y not in self.samples:
    #             sample = self.eSample.get_sample(y,modelName)
    #             self.samples[y]=sample
    #     if self.datasetName in ["train","test","valid"]:
    #         with shelve.open(r"/home/cyw/projects/function_sim_project/all_data/sampleDatas/{}_predata_for_train/datas_{}".format(modelName,self.datasetName)) as file:
    #             file["samples"]=self.samples
    #             file.close()

    # 可恶,要改成稀疏矩阵的样子,不支持邻接矩阵是我没想到的.

    def __getitem__(self, idx):
        sample_x = self.eSample.get_sample(self.data[idx][0], modelName)
        adj_sparse_x = torch.FloatTensor(
            sample_x["adj"]).to_sparse().indices().numpy()
        # x=[sample_x["adj"],sample_x["att"]]
        x = [adj_sparse_x, sample_x["att"]]

        sample_y = self.eSample.get_sample(self.data[idx][1], modelName)
        adj_sparse_y = torch.FloatTensor(
            sample_y["adj"]).to_sparse().indices().numpy()
        y = [adj_sparse_y, sample_y["att"]]
        value = torch.tensor([self.data[idx][2]])
        return [x, y, value]

    def __len__(self):
        return len(self.data)


if __name__ == "__main__":
    datapre = dataPre()
    data_pair_infs = datapre.get_pair_infs()

    load_from_shelve = False
    DataLoader = MGMN_dataset(data_pair_infs["test"], "test")
    DataLoader = MGMN_dataset(data_pair_infs["valid"], "valid")
    DataLoader = MGMN_dataset(data_pair_infs["train"], "train")
    print("MGMN数据预处理完成")
