import torch
from tqdm import tqdm
from torch_geometric.data import Data, InMemoryDataset
import pandas as pd
from sklearn.preprocessing import LabelEncoder

df = pd.read_csv("data/sentences_data.csv")
df.columns = ["session_id", "word_id",  "label","category"]
# print(df.word_id.max())

class EnglishBinaryDataset(InMemoryDataset):
    def __init__(self, root, transform=None, pre_transform=None):
        super(EnglishBinaryDataset, self).__init__(root, transform, pre_transform)
        self.data, self.slices = torch.load(self.processed_paths[0])
        self.df = df

    @property
    def raw_file_names(self):
        return []

    def download(self):
        pass

    @property
    def processed_file_names(self):
        return ["english_binary_1M_sess_dataset"]

    def process(self):
        data_list = []
        grouped = df.groupby("session_id")
        for session_id, group in tqdm(grouped):
            # print("##################")
            # print(sid)
            # print(group)
            #这里的group是df数据格式
            word_local_index = LabelEncoder().fit_transform(group.word_id)
            # print(word_local_index,"word_local_index")

            group = group.reset_index(drop=True)
            group["word_local_index"] = word_local_index
            node_features = group.loc[group.session_id == session_id, ["word_local_index", "word_id"]].sort_values(
                "word_local_index").word_id.drop_duplicates().values

            node_features = torch.LongTensor(node_features).unsqueeze(1)
            target_nodes = group.word_local_index.values[1:]
            source_nodes = group.word_local_index.values[:-1]
            edge_index = torch.tensor([source_nodes, target_nodes], dtype=torch.long)
            x = node_features
            y = torch.FloatTensor([group.label.values[0]])
            data = Data(x=x, edge_index=edge_index, y=y)
            data_list.append(data)

        data, slices = self.collate(data_list)
        torch.save((data, slices), self.processed_paths[0])


my_dataset = EnglishBinaryDataset(root="data")
my_dataset.process()
# # print(my_dataset)
# print(my_dataset[0].x,my_dataset[0].edge_index,my_dataset[0].y)
# print(my_dataset[1].x,my_dataset[1].edge_index,my_dataset[1].y)
# print(my_dataset[2].x,my_dataset[2].edge_index,my_dataset[2].y)