import math
import os.path
import random

import numpy as np
import pandas as pd
from torch.utils.data import Dataset
from torch.utils.data.dataset import T_co


def pad(array, length, constant_values=0):
    if len(array) >= length:
        return np.array(array[-length:])
    return np.pad(array, (0, length - len(array)), constant_values=constant_values)


class SampleDataset(Dataset):

    def __init__(self, dataset, k):
        self.k = k
        self.dataset = dataset
        self.index_map = random.sample(range(len(dataset)), k)

    def __getitem__(self, index) -> T_co:
        return self.dataset[self.index_map[index]]

    def __len__(self):
        return len(self.index_map)


class EvalDataset(Dataset):
    def __init__(self, pre_data, test: pd.DataFrame,
                 window_size, timestamp_sample, item_num):
        self.window_size = window_size
        self.pad_len = window_size * timestamp_sample
        self.pad_value = item_num
        self.timestamp_sample = timestamp_sample
        self.test = test
        self.process_data = self.pre_process_data(pre_data)
        self.screen_out_test()

    def screen_out_test(self):
        users_min_timestamp = {user: min(timestamps.keys()) for user, timestamps in self.process_data.items()}

        screen_out = self.test[self.test.userID.isin(users_min_timestamp.keys())]

        screen_out = screen_out.groupby('userID') \
            .apply(lambda x: x[x.timestamp > users_min_timestamp[x.userID.iloc[0]]])
        screen_out.reset_index(drop=True, inplace=True)
        self.test = screen_out

    @staticmethod
    def pre_process_data(data_frame: pd.DataFrame):
        process_data = {}
        users = data_frame.userID.unique()
        users = sorted(users)
        for user in users:
            select_user = data_frame[data_frame.userID == user]
            time_sort = select_user.sort_values("timestamp")
            group = time_sort.groupby("timestamp")
            user_data = {timestamp: np.array(data['itemID']) for timestamp, data in group}
            process_data[user] = user_data
        return process_data

    def sample_item(self, item):
        data_len = len(item)
        if self.timestamp_sample > data_len:
            return item
        sample_idx = random.sample(range(data_len), self.timestamp_sample)
        sample_item = item[sample_idx]
        return sample_item

    def sample_time(self, timestamp):
        sample_item = [self.sample_item(item) for item in timestamp]
        item_seq = np.concatenate(sample_item, axis=0)
        seq_len = len(item_seq)
        item_seq = pad(item_seq, self.pad_len, self.pad_value)
        return item_seq, seq_len

    def choose_timestamp(self, timestamps: dict, timestamp):
        times = np.array(list(timestamps.keys()), dtype=np.int32)
        times.sort()
        times = times[times < timestamp]
        if len(times) >= self.window_size:
            times = times[-self.window_size:]
        return [timestamps[t] for t in times]

    def __getitem__(self, idx):
        user, item, timestamp, rating = self.test.loc[idx]
        user = int(user)
        item = int(item)
        timestamp = int(timestamp)
        window = self.choose_timestamp(self.process_data[user], timestamp)
        item_seq, seq_len = self.sample_time(window)
        return item_seq, seq_len, item, rating

    def __len__(self):
        return self.test.shape[0]


class QoSDataset(Dataset):
    def __init__(self, train, window, sample, predict_ratio, item_num):
        self.sample = sample
        self.window = window
        self.pad_len = sample * window
        self.pad_value = item_num

        self.process_data = self.pre_process_data(train)
        # 计算映射，每行数据为(userID, 取的时间片切片, 预测的时间片, 预测的时间片中的第几个)
        self.data_map = []
        for user, data in self.process_data.items():
            user_map = [(user, timestamp_seq, predict_timestamp)
                        for timestamp_seq, predict_timestamp in self.generate_window(window, len(data))]
            for u, timestamp_seq, predict_timestamp in user_map:
                predict_length = len(self.process_data[u][predict_timestamp][0])
                predict_length = math.ceil(predict_ratio * predict_length)
                fine_grained = [(u, timestamp_seq, predict_timestamp, i) for i in range(predict_length)]
                self.data_map.extend(fine_grained)

    @staticmethod
    def generate_window(window, length):
        if window < length:
            for sub_length in range(1, window + 1):
                yield (0, sub_length), sub_length
            for start in range(1, length - window):
                yield (start, start + window), start + window
        else:
            for sub_length in range(1, length):
                yield (0, sub_length), sub_length

    @staticmethod
    def pre_process_data(data_frame: pd.DataFrame):
        process_data = {}
        users = data_frame.userID.unique()
        users = sorted(users)
        for user in users:
            select_user = data_frame[data_frame.userID == user]
            time_sort = select_user.sort_values("timestamp")
            group = time_sort.groupby("timestamp")
            user_data = [(np.array(data['itemID']), np.array(data['rating'])) for timestamp, data in group]
            process_data[user] = user_data
        return process_data

    def sample_timestamp(self, timestamp):
        item, _ = timestamp
        data_len = len(item)
        if self.sample > data_len:
            return item
        sample_idx = random.sample(range(data_len), self.sample)
        sample_item = item[sample_idx]
        return sample_item

    def __getitem__(self, index) -> T_co:
        user, cut, predict, idx = self.data_map[index]
        start, end = cut

        timestamp = self.process_data[user][start: end]
        sample_item = list(map(self.sample_timestamp, timestamp))

        item_seq = np.concatenate(sample_item, axis=0)
        seq_len = len(item_seq)
        item_seq = pad(item_seq, self.pad_len, self.pad_value)

        predict_timestamp = self.process_data[user][predict]
        predict_item, predict_rating = predict_timestamp
        predict_item = predict_item[idx]
        predict_rating = predict_rating[idx]
        predict_rating = predict_rating.astype(dtype=np.float32)

        aug_seq1, aug_seq1_len, aug_seq2, aug_seq2_len = self.data_augment(item_seq)

        return item_seq, seq_len, aug_seq1, aug_seq1_len, aug_seq2, aug_seq2_len, predict_item, predict_rating

    def __len__(self) -> int:
        return len(self.data_map)

    def data_augment(self, seq):
        aug1 = None
        aug2 = None
        len1 = None
        len2 = None
        switch = random.sample(range(3), k=2)
        if switch[0] == 0:
            aug1, len1 = self.item_crop(seq, self.pad_len)
        elif switch[0] == 1:
            aug1, len1 = self.item_mask(seq, self.pad_len)
        elif switch[0] == 2:
            aug1, len1 = self.item_random(seq, self.pad_len)

        if switch[1] == 0:
            aug2, len2 = self.item_crop(seq, self.pad_len)
        elif switch[1] == 1:
            aug2, len2 = self.item_mask(seq, self.pad_len)
        elif switch[1] == 2:
            aug2, len2 = self.item_random(seq, self.pad_len)

        return aug1, len1, aug2, len2

    def item_crop(self, seq, pad_len):
        seq = np.delete(seq, np.where(seq == self.pad_value))
        eta = 0.6
        length = len(seq)
        if length > 1:
            crop_length = math.floor(length * eta)
            begin = random.randint(0, length - crop_length)
            end = begin + crop_length
            aug = seq[begin:end]
            aug = pad(aug, pad_len, self.pad_value)
        elif length == 1:
            aug = pad(seq, pad_len, self.pad_value)
            crop_length = length
        else:
            aug = pad(seq, pad_len, self.pad_value)
            crop_length = 1
        return aug, crop_length

    def item_mask(self, seq, pad_len):
        seq = np.delete(seq, np.where(seq == self.pad_value))
        beta = 0.6
        length = len(seq)
        mask_length = math.floor(length * beta)
        mask_index = random.sample(range(length), mask_length)
        seq[mask_index] = self.pad_value
        seq = pad(seq, pad_len, self.pad_value)
        return seq, length

    def item_random(self, seq, pad_len):
        seq = np.delete(seq, np.where(seq == self.pad_value))
        gamma = 0.6
        length = len(seq)
        random_length = math.floor(length * gamma)
        begin = random.randint(0, length - random_length)
        end = begin + random_length
        sub_shuffle = seq[begin:end]
        random.shuffle(sub_shuffle)
        aug = np.concatenate([seq[:begin], sub_shuffle, seq[end:]])
        aug = pad(aug, pad_len, self.pad_value)
        return aug, length

    def shuffle(self):
        for timestamp in self.process_data.values():
            for i in range(len(timestamp)):
                item, rating = timestamp[i]
                idx = np.arange(len(item))
                np.random.shuffle(idx)
                new_item, new_rating = item[idx], rating[idx]
                timestamp[i] = (new_item, new_rating)


if __name__ == '__main__':
    data_dir = '../newdata'
    df = pd.read_csv(os.path.join(data_dir, 'QoStrain', f'train{1}.csv'))
    val_df = pd.read_csv(os.path.join(data_dir, 'QoSval', f'val{1}.csv'))
