# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.

from torch_geometric.data import Dataset, InMemoryDataset
import torch

from functools import lru_cache
from typing import List

import os
import sys
import pickle

sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

from wrapper import preprocess_item
from collator import collator

feature_dir = os.path.join(os.path.split(os.path.split(os.path.split(os.path.split(os.path.split(os.path.abspath(__file__))[0])[0])[0])[0])[0], "features", "CSN_v2")
data_dir = os.path.join(os.path.split(os.path.split(os.path.split(os.path.split(os.path.split(os.path.abspath(__file__))[0])[0])[0])[0])[0], "data", "CSN")


class CodeSearchNetPYGDataset(Dataset):
    def __init__(self, data_size, max_node=128, seq_length=512, max_node_num=511, multi_hop_max_dist=5, spatial_pos_max=1024, num_classes=128, root=feature_dir, subdirectory=None, ground_truth=None, transform=None, pre_transform=None, pre_filter=None, dataset="train"):
        self.dataset = dataset
        self.data_size = data_size
        self.max_node = max_node
        self.max_node_num = max_node_num
        self.multi_hop_max_dist = multi_hop_max_dist
        self.spatial_pos_max = spatial_pos_max
        self.seq_length = seq_length
        self.num_classes = num_classes
        self.subdirectory = subdirectory
        self.ground_truth = os.path.join(data_dir, f'{dataset}_enhanced.jsonl' if ground_truth is None else ground_truth)
        super().__init__(root, transform, pre_transform, pre_filter)
        self.read_data_size()

    @property
    def raw_file_names(self):
        return [f'{self.dataset}_features.pt']

    @property
    def processed_file_names(self):
        return [f'data_{self.dataset}_{i}.pt' for i in range((self.data_size-1)//10000+1)]

    def download(self):
        pass

    def process(self):
        if self.subdirectory is not None:
            save_dir = os.path.join(self.root, self.subdirectory)
            if not os.path.exists(save_dir):
                os.mkdir(save_dir)
        data_list = torch.load(self.raw_paths[0]) if len(self.raw_paths) == 1 else None
        self.data_size = len(data_list)
        idx, processed_list = 0, []
        for data in data_list:
            if self.pre_filter is not None and not self.pre_filter(data):
                continue
            if self.pre_transform is not None:
                processed_list.append(self.pre_transform(data))
            else:
                processed_list.append(data)

            idx += 1
            if idx % 10000 == 0:
                self.save_data(processed_list, idx//10000-1)
                processed_list = []
        else:
            self.data_size = idx
            self.update_data_size()
            if len(processed_list):
                self.save_data(processed_list, idx//10000)

    def save_data(self, processed_list, file_no):
        data, slices = InMemoryDataset.collate(processed_list)
        if self.subdirectory is not None:
            torch.save((data, slices), self.subdirectory_paths[file_no])
        else:
            torch.save((data, slices), self.processed_paths[file_no])

    @lru_cache
    def read_data_size(self):
        file_name = os.path.join(self.root, 'data_size.pkl')
        with open(file_name, 'rb') as pic:
            data = pickle.load(pic)
        if self.subdirectory is not None and self.subdirectory in data:
            self.data_size = data[self.subdirectory]
        return data

    def update_data_size(self):
        data = self.read_data_size()
        file_name = os.path.join(self.root, 'data_size.pkl')
        if self.subdirectory is not None:
            data[self.subdirectory] = self.data_size
            with open(file_name, 'wb') as pic:
                pickle.dump(data, pic)

    @property
    def subdirectory_paths(self) -> List[str]:
        return [os.path.join(self.root, self.subdirectory, f'data_{self.dataset}_{i}.pt')
                for i in range((self.data_size - 1) // 10000 + 1)]

    def len(self):
        return len(self.processed_file_names)

    @lru_cache(maxsize=16)
    def get(self, idx):
        ds = InMemoryDataset(self.root, self.transform, self.pre_transform, self.pre_filter)
        ds.data, ds.slices = torch.load(os.path.join(self.processed_dir if self.subdirectory is None else os.path.join(self.root, self.subdirectory), f'data_{self.dataset}_{idx}.pt'))
        return ds

    def __getitem__(self, idx):
        if isinstance(idx, int) and idx < self.data_size:
            ds_index = idx // 10000
            index = idx % 10000
            item = self.get(ds_index)[index]
            item.idx = idx
            # item.y = item.y.reshape(-1)
            return preprocess_item(item)
        else:
            raise TypeError("index to a Graphormer PYGDataset can only be an integer.")

    def __len__(self):
        return self.data_size

    def collate(self, batch):
        return collator(batch, max_node=self.max_node, seq_length=self.seq_length, max_node_num=self.max_node_num, multi_hop_max_dist=self.multi_hop_max_dist, spatial_pos_max=self.spatial_pos_max, num_classes=self.num_classes)


if __name__ == "__main__":
    train_dataset = CodeSearchNetPYGDataset(data_size=164814, dataset="train")
    train_dataset.process()

    test_dataset = CodeSearchNetPYGDataset(data_size=10952, dataset="test")
    test_dataset.process()

    valid_dataset = CodeSearchNetPYGDataset(data_size=5179, dataset="valid")
    valid_dataset.process()

