import numpy
import torch
from torch.nn.modules import padding
from torch.utils.data import Dataset
from torch.nn.utils.rnn import pad_sequence
import pickle, pandas as pd


def pad_matrix(matrix, padding_index=0):
    max_len = max(i.size(0) for i in matrix)
    batch_matrix = []
    for item in matrix:
        item = item.numpy()
        batch_matrix.append(numpy.pad(item, ((0, max_len-len(item)), (0, max_len-len(item))), 'constant', constant_values=(padding_index, padding_index)))
    return batch_matrix


class DailyDialogRobertaCometDataset(Dataset):

    def __init__(self, split):
        '''
        label index mapping = {'hap':0, 'sad':1, 'neu':2, 'ang':3, 'exc':4, 'fru':5}
        '''
        self.target_context, self.token_ids, self.attention_mask, self.speakers, self.cause_labels, self.emotion_label, \
            self.relative_position, self.intra_mask, self.inter_mask, self.Ids = pickle.load(
            open('./data/dailydialog_features_roberta_ptm_' + split + '.pkl', 'rb'), encoding='latin1')

        self.isAfter, self.HasSubEvent, self.isBefore, self.Causes, self.xReason \
            = pickle.load(open('./data/dailydialog_csk_event_' + split + '.pkl', 'rb'), encoding='latin1')

        self.xReact, self.xWant, self.xIntent, self.xEffect, self.oReact, self.oWant, self.oIntent, self.oEffect \
            = pickle.load(open('./data/dailydialog_csk_social_' + split + '.pkl', 'rb'), encoding='latin1')

        self.keys = [x for x in self.Ids]
        self.len = len(self.keys)

    def __getitem__(self, index):
        vid = self.keys[index]
        return torch.FloatTensor(self.isBefore[vid]), \
            torch.FloatTensor(self.isAfter[vid]), \
            torch.FloatTensor(self.xWant[vid]), \
            torch.FloatTensor(self.xReact[vid]), \
            torch.FloatTensor(self.xEffect[vid]), \
            torch.FloatTensor(self.xIntent[vid]), \
            torch.FloatTensor(self.oWant[vid]), \
            torch.FloatTensor(self.oReact[vid]), \
            torch.FloatTensor(self.oEffect[vid]), \
            torch.FloatTensor(self.oIntent[vid]), \
            torch.FloatTensor([[1, 0] if x == 'A' else [0, 1] for x in self.speakers[vid]]), \
            torch.FloatTensor([1] * len(self.cause_labels[vid])), \
            torch.LongTensor(self.cause_labels[vid]), \
            torch.LongTensor(self.emotion_label[vid]), \
            torch.LongTensor(self.relative_position[vid]), \
            torch.FloatTensor(self.intra_mask[vid]), \
            torch.FloatTensor(self.inter_mask[vid]), \
            self.attention_mask[vid], \
            self.token_ids[vid], \
            self.target_context[vid], \
            self.speakers[vid], \
            self.Ids[index]

        # return torch.FloatTensor(self.isBefore[vid]), \
        #     torch.FloatTensor(self.isAfter[vid]), \
        #     torch.FloatTensor(self.xWant[vid]), \
        #     torch.FloatTensor(self.xReact[vid]), \
        #     torch.FloatTensor(self.oWant[vid]), \
        #     torch.FloatTensor(self.oReact[vid]), \
        #     torch.FloatTensor([[1, 0] if x == 'A' else [0, 1] for x in self.speakers[vid]]), \
        #     torch.FloatTensor([1] * len(self.cause_labels[vid])), \
        #     torch.LongTensor(self.cause_labels[vid]), \
        #     torch.LongTensor(self.emotion_label[vid]), \
        #     torch.LongTensor(self.relative_position[vid]), \
        #     torch.FloatTensor(self.intra_mask[vid]), \
        #     torch.FloatTensor(self.inter_mask[vid]), \
        #     self.attention_mask[vid], \
        #     self.token_ids[vid], \
        #     self.target_context[vid], \
        #     self.speakers[vid], \
        #     self.Ids[index]

    def __len__(self):
        return self.len

    def collate_fn(self, data):
        dat = pd.DataFrame(data)
        intra_mask = torch.FloatTensor(pad_matrix(dat[15]))
        inter_mask = torch.FloatTensor(pad_matrix(dat[16]))

        result = []
        for i in dat:
            if i < 10:
                result.append(pad_sequence(dat[i]))
            elif i < 14:
                result.append(pad_sequence(dat[i], True))
            elif i < 15:
                result.append(pad_sequence(dat[i], True, padding_value=31))
            elif i < 16:
                result.append(intra_mask)
            elif i < 17:
                result.append(inter_mask)
            else:
                result.append(dat[i])

        return result
        # return [pad_sequence(dat[i]) if i<6 else pad_sequence(dat[i], True) if i<10 else pad_sequence(dat[i], True, padding_value=31) if i<11 else intra_mask if i<12 else inter_mask if i<13  else dat[i] for i in dat]\

    # 0 torch.FloatTensor(self.isBefore[vid]),\    (max_len, batch_size, dim)
    # 1 torch.FloatTensor(self.isAfter[vid]),\     (max_len, batch_size, dim)
    # 2 torch.FloatTensor(self.xWant[vid]),\       (max_len, batch_size, dim)
    # 3 torch.FloatTensor(self.xReact[vid]),\      (max_len, batch_size, dim)
    # 4 torch.FloatTensor(self.oWant[vid]),\       (max_len, batch_size, dim)
    # 5 torch.FloatTensor(self.oReact[vid]),\      (max_len, batch_size, dim)
    # 6 torch.FloatTensor([[1,0] if x=='A' else [0,1] for x in self.speakers[vid]]),\   (batch_size, max_len, 2)
    # 7 torch.FloatTensor([1]*len(self.cause_labels[vid])),\      表示每段对话中真实的对话长度，1是有效的，0是为了对齐batch里最长的对话padding的(batch_size, max_len) [1,1,1,0,0,0,0]
    # 8 torch.LongTensor(self.cause_labels[vid]),\            (batch_size, max_len)    [0,0,0,1,0,0]
    # 9 torch.LongTensor(self.emotion_label[vid]),\       (batch_size, max_len)
    # 10 torch.LongTensor(self.relative_position[vid]),\   (batch_size, max_len)     当前情感句是0，前一句是1，再前一句是2；情感局后面的padding的相对位置都表示为31
    # 11 torch.FloatTensor(self.intra_mask[vid]),\    (batch_size, max_len, max_len)
    # 12 torch.FloatTensor(self.inter_mask[vid]),\    (batch_size, max_len, max_len)
    # 13 self.attention_mask[vid],\                   Series : (batch_size) 表示每个对话里，各个句子的=attention_mask
    # 14 self.token_ids[vid],\                        Series : (batch_size)
    # 15 self.target_context[vid], \                  Series : (batch_size)
    # 16 self.speakers[vid], \                        Series : (batch_size)
    # 17 self.Ids[index]                              Series : (batch_size)