import torch
import torch.nn as nn
import torch.utils.data as Data
import pickle
import random


# DataSet
class BertDataset(Data.Dataset):
    def __init__(self, hyper_params, train_items, train_time, cur_cnt):
        self.hyper_params = hyper_params
        self.train_items: torch.Tensor = train_items
        self.train_time: torch.Tensor = train_time
        self.cur_cnt: torch.Tensor = cur_cnt

    def __getitem__(self, index):
        user_id = random.randint(0, self.hyper_params['total_items'] - 1)

        if random.random() < self.hyper_params['mask_last']:
            batchx = self.train_items[user_id]
            mask = (torch.rand(
                self.hyper_params['seq_len']) < self.hyper_params['mask_rate'])
            batchx = torch.masked_fill(
                batchx, mask, self.hyper_params['total_items'] + 1)
            batchy = self.train_items[user_id]
            cur_cnt = self.cur_cnt[user_id]
            padding = torch.cat([torch.zeros(cur_cnt), torch.ones(
                self.hyper_params['seq_len'] - cur_cnt)], dtype=torch.bool)
            batcht = self.train_time[user_id]

            return batchx, batchy, batcht, mask, padding, cur_cnt, user_id
        else:
            batchx = self.train_items[user_id]
            mask = torch.zeros(self.hyper_params['seq_len'], dtype=torch.bool)
            cur_cnt = self.cur_cnt[user_id]
            batchx[cur_cnt - 1] = self.hyper_params['total_items'] + 1
            batchy = self.train_items[user_id]
            mask[cur_cnt - 1] = 1
            padding = torch.cat(
                [torch.zeros(cur_cnt), torch.ones(self.hyper_params['seq_len'] - cur_cnt)]).bool()
            batcht = self.train_time[user_id]

            return batchx, batchy, batcht, mask, padding, cur_cnt, user_id

    def __len__(self):
        return self.hyper_params['total_users']


class DatasetLoader():
    def __init__(self, hyper_params):
        self.hyper_params = hyper_params

        # Data.
        self.train_items = None
        self.test_items = None
        self.val_items = None
        self.train_time = None
        self.test_time = None
        self.val_time = None
        self.train_cnt = None
        self.val_cnt = None
        self.test_cnt = None
        self.min_time = None
        self.max_time = None

        self.total_users = 0
        self.total_items = 0
        # time_split, loo, ratio_split
        self.split_type = hyper_params['split_type']
        # normal, bert
        self.dataset_type = hyper_params['dataset_type']

        self.load_data()
        hyper_params['total_users'] = self.total_users
        hyper_params['total_items'] = self.total_items

        print(f'Users:{self.total_users}, Items:{self.total_items}')

    def load_data(self):
        try:
            self.train_items, self.val_items, self.test_items, self.train_time, self.val_time, self.test_time, self.train_cnt, self.val_cnt, self.test_cnt, self.total_users, self.total_items, self.min_time, self.max_time = torch.load(
                f'./model_dat/{self.hyper_params["dataset_path"]}_{self.split_type}_{self.dataset_type}_{self.hyper_params["seq_len"]}.pkl')
        except Exception as err:
            print(err)
            self.load_from_file()

    def load_from_file(self):
        print('Loading from file...')
        f = open(
            f'./datasets/{self.hyper_params["dataset_path"]}.txt', 'rt')

        temp_users = {}
        temp_time = {}
        self.min_time, self.max_time = 1e20, 0

        # Read data from file.
        for i, line in enumerate(f.readlines()):
            user, item, rate, t = line.strip().split(
                self.hyper_params['splitter'])

            if float(rate) < self.hyper_params['min_rate']:
                continue

            self.min_time = min(int(t), self.min_time)
            self.max_time = max(int(t), self.max_time)

            if temp_users.get(user) is None:
                temp_users[user] = []
                temp_time[user] = []

            temp_users[user].append(item)
            temp_time[user].append(int(t))

        cur_user, cur_item = 0, 0
        item_dict = {}
        print('Load from file finished. Processing...')

        # Process the data according to the split type.
        train_item_matrix, train_time_matrix = [], []
        val_item_matrix, val_time_matrix = [], []
        test_item_matrix, test_time_matrix = [], []

        if self.hyper_params['output_file']:
            f_o = open(
                f'./datasets/{self.hyper_params["dataset_path"]}-o.txt', 'wt')

        for (k, v) in temp_users.items():
            if len(v) < self.hyper_params['min_items']:
                continue

            item_list = []

            # Sort according to time.
            for i, item in enumerate(v):
                item_list.append([item, temp_time[k][i]])
            item_list = sorted(item_list, key=lambda x: x[1])
            if len(item_list) > self.hyper_params['seq_len']:
                item_list = item_list[-self.hyper_params['seq_len']:]

            if self.split_type == 'time_split':
                # For time split
                if item_list[0][1] >= self.hyper_params['time_split'] or item_list[-1][1] < self.hyper_params['time_split']:
                    continue
                cur_user += 1

                for i, item in enumerate(v):
                    if item_dict.get(item) is None:
                        cur_item += 1
                        item_dict[item] = cur_item

                for i in range(len(item_list)):
                    if item_list[i][1] >= self.hyper_params['time_split']:
                        train_item_matrix.append(
                            [item_dict[j[0]] for j in item_list[:i]])
                        train_time_matrix.append([j[1] for j in item_list[:i]])

                        test_item_matrix.append(item_dict[item_list[i][0]])
                        test_time_matrix.append(item_list[i][1])
                        break
                raise NotImplementedError()
            elif self.split_type == 'loo':
                cur_user += 1
                for i, item in enumerate(v):
                    if item_dict.get(item) is None:
                        cur_item += 1
                        item_dict[item] = cur_item

                train_item_matrix.append([item_dict[i[0]]
                                          for i in item_list[:-2]])
                train_time_matrix.append([i[1] for i in item_list[:-2]])
                val_item_matrix.append([item_dict[i[0]]
                                        for i in item_list[:-1]])
                val_time_matrix.append([i[1] for i in item_list[:-1]])
                test_item_matrix.append([item_dict[i[0]]
                                         for i in item_list])
                test_time_matrix.append([i[1] for i in item_list])
            elif self.split_type == 'ratio':
                cur_user += 1
                for i, item in enumerate(v):
                    if item_dict.get(item) is None:
                        cur_item += 1
                        item_dict[item] = cur_item

                train_cnt = int(len(item_list) *
                                self.hyper_params['train_ratio'])

                train_item_matrix.append([item_dict[i[0]]
                                          for i in item_list[:train_cnt]])
                train_time_matrix.append([i[1] for i in item_list[:train_cnt]])
                val_item_matrix.append([item_dict[i[0]]
                                        for i in item_list])
                val_time_matrix.append([i[1] for i in item_list])
                test_item_matrix.append([item_dict[i[0]]
                                         for i in item_list])
                test_time_matrix.append([i[1] for i in item_list])

                if self.hyper_params['output_file']:
                    for it in item_list:
                        f_o.write(f'{cur_user} {item_dict[it[0]]}\n')
            else:
                print(f'Illegal split type: {self.split_type}. Please check!')
                raise NotImplementedError()

        self.total_users = cur_user
        self.total_items = cur_item

        print('Loading into tensors...')

        train_item = torch.zeros(
            [self.total_users, self.hyper_params['seq_len']], dtype=torch.long)
        train_time = torch.zeros(
            [self.total_users, self.hyper_params['seq_len']], dtype=torch.long)

        val_item = torch.zeros(
            [self.total_users, self.hyper_params['seq_len']], dtype=torch.long)
        val_time = torch.zeros(
            [self.total_users, self.hyper_params['seq_len']], dtype=torch.long)

        test_item = torch.zeros(
            [self.total_users, self.hyper_params['seq_len']], dtype=torch.long)
        test_time = torch.zeros(
            [self.total_users, self.hyper_params['seq_len']], dtype=torch.long)
        train_cnt = torch.zeros([self.total_users], dtype=torch.long)
        val_cnt = torch.zeros([self.total_users], dtype=torch.long)
        test_cnt = torch.zeros([self.total_users], dtype=torch.long)

        for i in range(self.total_users):
            train_len = len(train_item_matrix[i])
            val_len = len(val_item_matrix[i])
            test_len = len(test_item_matrix[i])

            train_item[i, :train_len] = torch.LongTensor(train_item_matrix[i])
            train_time[i, :train_len] = torch.LongTensor(train_time_matrix[i])

            val_item[i, :val_len] = torch.LongTensor(val_item_matrix[i])
            val_time[i, :val_len] = torch.LongTensor(val_time_matrix[i])

            test_item[i, :test_len] = torch.LongTensor(test_item_matrix[i])
            test_time[i, :test_len] = torch.LongTensor(test_time_matrix[i])

            train_cnt[i] = train_len
            val_cnt[i] = val_len
            test_cnt[i] = test_len

        self.train_items = train_item
        self.test_items = test_item
        self.val_items = val_item
        self.train_time = train_time
        self.test_time = test_time
        self.val_time = val_time
        self.train_cnt = train_cnt
        self.test_cnt = test_cnt
        self.val_cnt = val_cnt

        print('Finished')
        torch.save((self.train_items, self.val_items, self.test_items, self.train_time, self.val_time, self.test_time, self.train_cnt, self.val_cnt, self.test_cnt, self.total_users, self.total_items, self.min_time, self.max_time),
                   f'./model_dat/{self.hyper_params["dataset_path"]}_{self.split_type}_{self.dataset_type}_{self.hyper_params["seq_len"]}.pkl')

    def generate_train_data(self):
        if self.dataset_type == 'bert':
            return BertDataset(hyper_params, self.train_items, self.train_time, self.cur_cnt)
        elif self.dataset_type == 'normal':
            batchx = torch.zeros(
                [self.total_users, self.hyper_params['seq_len']], dtype=torch.long)
            batchy = torch.zeros(
                [self.total_users, self.hyper_params['seq_len']], dtype=torch.long)
            batcht = torch.zeros(
                [self.total_users, self.hyper_params['seq_len']], dtype=torch.float)

            cur_cnt = torch.zeros([self.total_users], dtype=torch.long)
            padding = torch.zeros(
                [self.total_users, self.hyper_params['seq_len']]).bool()
            user_id = torch.arange(0, self.total_users)

            for i in range(self.total_users):
                cnt = self.train_cnt[i]
                batchx[i, :cnt-1] = self.train_items[i, :cnt - 1]
                batchy[i, :cnt-1] = self.train_items[i, 1:cnt]
                batcht[i, :cnt-1] = (self.train_time[i, 1:cnt] -
                                     self.min_time).float()/(self.max_time - self.min_time)
                cur_cnt[i] = cnt - 1

                padding[i] = torch.cat([torch.zeros(cnt-1), torch.ones(
                    self.hyper_params['seq_len'] - cnt + 1)]).bool()

            return Data.TensorDataset(batchx, batchy, batcht, padding, cur_cnt, user_id)
        elif self.dataset_type == 'time_interval':
            batchx = torch.zeros(
                [self.total_users, self.hyper_params['seq_len']], dtype=torch.long)
            batchy = torch.zeros(
                [self.total_users, self.hyper_params['seq_len']], dtype=torch.long)
            batcht = torch.zeros(
                [self.total_users, self.hyper_params['seq_len']], dtype=torch.float)

            cur_cnt = torch.zeros([self.total_users], dtype=torch.long)
            padding = torch.zeros(
                [self.total_users, self.hyper_params['seq_len']]).bool()
            user_id = torch.arange(0, self.total_users)

            for i in range(self.total_users):
                cnt = self.train_cnt[i]
                batchx[i, -cnt+1:] = self.train_items[i, :cnt - 1]
                batchy[i, -cnt+1:] = self.train_items[i, 1:cnt]
                batcht[i, -cnt+1:] = ((self.train_time[i, 1:cnt] - self.min_time).float() / (
                    self.max_time-self.min_time) * self.hyper_params['time_span']).long()
                cur_cnt[i] = cnt - 1

                padding[i] = torch.cat([torch.ones(
                    self.hyper_params['seq_len'] - cnt + 1), torch.zeros(cnt - 1)], dim=-1).bool()

            return Data.TensorDataset(batchx, batchy, batcht, padding, cur_cnt, user_id)
        elif self.dataset_type == 'tcn':
            batchx = torch.zeros(
                [self.total_users, self.hyper_params['seq_len']], dtype=torch.long)
            batchy = torch.zeros(
                [self.total_users, self.hyper_params['seq_len']], dtype=torch.long)
            batcht = torch.zeros(
                [self.total_users, self.hyper_params['seq_len']], dtype=torch.float)

            cur_cnt = torch.zeros([self.total_users], dtype=torch.long)
            padding = torch.zeros(
                [self.total_users, self.hyper_params['seq_len']]).bool()
            user_id = torch.arange(0, self.total_users)

            for i in range(self.total_users):
                cnt = self.train_cnt[i]
                batchx[i, -cnt+1:] = self.train_items[i, :cnt - 1]
                batchy[i, -cnt+1:] = self.train_items[i, 1:cnt]
                batcht[i, -cnt+1:] = self.train_time[i, 1:cnt]
                cur_cnt[i] = cnt - 1

                padding[i] = torch.cat([torch.ones(
                    self.hyper_params['seq_len'] - cnt + 1), torch.zeros(cnt - 1)], dim=-1).bool()

            return Data.TensorDataset(batchx, batchy, batcht, padding, cur_cnt, user_id)
        else:
            print(f'Illegal dataset type: {self.dataset_type}. Please check!')
            raise NotImplementedError()

    def generate_test_data(self):
        batchx = torch.zeros(
            [self.total_users, self.hyper_params['seq_len']], dtype=torch.long)
        batchy = torch.zeros(
            [self.total_users, self.hyper_params['seq_len']], dtype=torch.long)
        batcht = torch.zeros(
            [self.total_users, self.hyper_params['seq_len']], dtype=torch.float)

        cur_cnt = torch.zeros([self.total_users], dtype=torch.long)
        padding = torch.zeros(
            [self.total_users, self.hyper_params['seq_len']]).bool()
        user_id = torch.arange(0, self.total_users)

        if self.dataset_type == 'bert':
            raise NotImplementedError()
        elif self.dataset_type == 'normal':
            for i in range(self.total_users):
                cnt = self.test_cnt[i]
                batchx[i, :cnt-1] = self.test_items[i, :cnt - 1]
                batchy[i, :cnt-1] = self.test_items[i, 1:cnt]
                batcht[i, :cnt-1] = self.test_time[i, 1:cnt]
                cur_cnt[i] = cnt - 1

                padding[i] = torch.cat([torch.zeros(cnt-1), torch.ones(
                    self.hyper_params['seq_len'] - cnt + 1)]).bool()

            return Data.TensorDataset(batchx, batchy, batcht, padding, cur_cnt, user_id)
        elif self.dataset_type == 'time_interval':
            for i in range(self.total_users):
                cnt = self.test_cnt[i]
                batchx[i, -cnt+1:] = self.test_items[i, :cnt - 1]
                batchy[i, -cnt+1:] = self.test_items[i, 1:cnt]
                batcht[i, -cnt+1:] = ((self.test_time[i, 1:cnt] - self.min_time).float() / (
                    self.max_time-self.min_time) * self.hyper_params['time_span']).long()
                cur_cnt[i] = cnt - 1

                padding[i] = torch.cat([torch.ones(
                    self.hyper_params['seq_len'] - cnt + 1), torch.zeros(cnt - 1)], dim=-1).bool()

            return Data.TensorDataset(batchx, batchy, batcht, padding, cur_cnt, user_id)
        elif self.dataset_type == 'tcn' and self.split_type == 'ratio':
            for i in range(self.total_users):
                cnt = self.test_cnt[i]
                input_cnt = int(self.hyper_params['train_ratio'] * cnt)
                batchx[i, -input_cnt:] = self.test_items[i, :input_cnt]
                batchy[i, :cnt-input_cnt] = self.test_items[i, input_cnt:cnt]
                batcht[i, :cnt] = self.test_time[i, :cnt]
                cur_cnt[i] = input_cnt

                padding[i] = torch.cat([torch.ones(
                    self.hyper_params['seq_len'] - input_cnt), torch.zeros(input_cnt)], dim=-1).bool()

            return Data.TensorDataset(batchx, batchy, batcht, padding, cur_cnt, user_id)
        else:
            print(f'Illegal dataset type: {self.dataset_type}. Please check!')
            raise NotImplementedError()

    def generate_val_data(self):
        batchx = torch.zeros(
            [self.total_users, self.hyper_params['seq_len']], dtype=torch.long)
        batchy = torch.zeros(
            [self.total_users, self.hyper_params['seq_len']], dtype=torch.long)
        batcht = torch.zeros(
            [self.total_users, self.hyper_params['seq_len']], dtype=torch.float)

        cur_cnt = torch.zeros([self.total_users], dtype=torch.long)
        padding = torch.zeros(
            [self.total_users, self.hyper_params['seq_len']]).bool()
        user_id = torch.arange(0, self.total_users)

        if self.dataset_type == 'bert':
            raise NotImplementedError()
        elif self.dataset_type == 'normal':
            for i in range(self.total_users):
                cnt = self.val_cnt[i]
                batchx[i, :cnt-1] = self.val_items[i, :cnt - 1]
                batchy[i, :cnt-1] = self.val_items[i, 1:cnt]
                batcht[i, :cnt-1] = self.val_time[i, 1:cnt]
                cur_cnt[i] = cnt - 1

                padding[i] = torch.cat([torch.zeros(cnt-1), torch.ones(
                    self.hyper_params['seq_len'] - cnt + 1)]).bool()

            return Data.TensorDataset(batchx, batchy, batcht, padding, cur_cnt, user_id)
        elif self.dataset_type == 'time_interval':
            for i in range(self.total_users):
                cnt = self.val_cnt[i]
                batchx[i, -cnt+1:] = self.val_items[i, :cnt - 1]
                batchy[i, -cnt+1:] = self.val_items[i, 1:cnt]
                batcht[i, -cnt+1:] = ((self.val_time[i, 1:cnt] - self.min_time).float() / (
                    self.max_time-self.min_time) * self.hyper_params['time_span']).long()
                cur_cnt[i] = cnt - 1

                padding[i] = torch.cat([torch.ones(
                    self.hyper_params['seq_len'] - cnt + 1), torch.zeros(cnt - 1)], dim=-1).bool()

            return Data.TensorDataset(batchx, batchy, batcht, padding, cur_cnt, user_id)
        else:
            print(f'Illegal dataset type: {self.dataset_type}. Please check!')
            raise NotImplementedError()


if __name__ == '__main__':
    hyper_params = {
        'dataset_path': 'ml-1m',
        'seq_len': 200,
        'time_split': -0.5,
        'splitter': '::',
        'min_rate': 3.5,
        'min_items': 5,
        'split_type': 'loo',
        'dataset_type': 'normal'
    }
    loader = DatasetLoader(hyper_params)
    test_dataset = loader.generate_test_data()
    pass
