import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset
from torch.nn.utils.rnn import pad_sequence
import os, random

from utils.mine_utils import random_index

class MKGCRankData(Dataset):
    def __init__(self, context, knowledge, response, tokenizer, rank_mode=1, context_len=256, response_len=128,
                 neg_num=4, pad_none=True):
        """

        Args:
            rank_mode: 知识库中正样本数量
            neg_num: 知识库中负样本数量
            pad_none: train时设置为true，以打乱负样本
        """
        super(Dataset, self).__init__()
        self.context = context
        self.knowledge = knowledge
        self.response = response
        self.tokenizer = tokenizer
        self.rank_mode = rank_mode
        self.context_len = context_len
        self.response_len = response_len
        self.neg_num = neg_num
        self.pad_none = pad_none
        print(f"RankData, context_len {context_len}, response_len {response_len}, neg_num {neg_num}, pad_none {pad_none}")

    def __getitem__(self, index):
        context = self.context[index]
        # response = self.response[index]
        knowledge = self.knowledge[index]

        topic = context[0]
        topic_len = len(self.tokenizer.encode(topic, add_special_tokens=False))
        his = self.tokenizer.encode(topic + ' ' + ' '.join(context[1:]))[:-1]  # default: <cls> xx </s>, skip </s>
        his = his[:topic_len + 1] + his[-(self.context_len - topic_len - 1):] if len(his) > self.context_len else his

        neg = knowledge[self.rank_mode:]
        if self.pad_none:
            random.shuffle(neg)
            neg = neg + ['<none>'] * (self.neg_num - len(neg))
        neg = neg[:self.neg_num]
        knowledge = knowledge[:self.rank_mode] + neg
        # response = self.tokenizer.encode(response, truncation=True, max_length=self.response_len, add_special_tokens=False)  # skip <cls> and </s>
        know = []
        for k in knowledge:
            k = torch.tensor(self.tokenizer.encode(  # default: <cls> xx </s>
                ' <knowledge> ' + k, truncation=True, max_length=self.response_len))
            know.append(k)
        return torch.tensor(his), know

    def __len__(self):
        return len(self.context)

    @staticmethod
    def collate_fn(data):
        context, knowledge = zip(*data)
        knowledge = sum(knowledge, [])
        context = pad_sequence(context, batch_first=True, padding_value=1)
        knowledge = pad_sequence(knowledge, batch_first=True, padding_value=1)
        b_s, _ = context.size()
        _, k_l = knowledge.size()
        knowledge = knowledge.reshape(b_s, -1, k_l)
        return {
            'context': context,  # [batch_size, context_len]
            'knowledge': knowledge  # [batch_size, pool_size, knowledge_len]
        }


class MKGCGenerateData(Dataset):
    def __init__(self, context, knowledge, response, tokenizer, generate_mode=1, context_len=256, response_len=128, lang_code=250004,
                 con_num=3, neg_num=7, pad_none=True, shuffle=True):
        """

        Args:
            generate_mode: 知识库中正样本数量
            lang_code: 对话所属语言所对应的语言编号
            con_num: 生成对话所拼接的知识数量
            neg_num: 知识库中负样本数量
            pad_none: train时设置为true，以打乱负样本
        """
        super(Dataset, self).__init__()
        self.context = context
        self.knowledge = knowledge
        self.response = response
        self.tokenizer = tokenizer
        self.generate_mode = generate_mode
        self.context_len = context_len
        self.response_len = response_len
        self.lang_code = lang_code
        self.con_num = con_num
        self.neg_num = neg_num
        self.pad_none = pad_none
        self.shuffle = shuffle
        print(f"MKGCGenerateData, context_len {context_len}, response_len {response_len}, lang_code {lang_code}, "
              f"con_num {con_num}, neg_num {neg_num}, pad_none {pad_none}, shuffle {shuffle}")

    def __getitem__(self, index):
        context = self.context[index]
        response = self.response[index]
        knowledge = self.knowledge[index]

        topic = context[0]
        topic_len = len(self.tokenizer.encode(topic, add_special_tokens=False))
        his = self.tokenizer.encode(topic + ' ' + ' '.join(context[1:]))[:-2]  # default: xx </s> lang_token, skip </s> and lang_token
        his = his[:topic_len] + his[-(self.context_len - topic_len):] if len(his) > self.context_len else his

        neg = knowledge[self.generate_mode:]
        if self.pad_none: # shuffle negative knowledges
            random.shuffle(neg)
            if self.shuffle:
                for m in range(self.generate_mode):  # 以一定概率替换正样本、负样本位置，以增强模型健壮性
                    i = min(random_index(k=self.con_num, pool_size=self.neg_num) + m, len(knowledge) - 1)
                    knowledge[i], knowledge[m] = knowledge[m], knowledge[i]
            neg = neg + ['<none>'] * (self.neg_num - len(neg))
        neg = neg[:self.neg_num]
        knowledge = knowledge[:self.generate_mode] + neg

        response = [self.lang_code] + self.tokenizer.encode(response, truncation=True, max_length=self.response_len)[:-1]  # skip lang_token and put lang_token forward

        con_knowledge = []
        for k in knowledge[:self.con_num]:
            con_knowledge += self.tokenizer.encode("<knowledge> " + k)[:-1]  # skip lang_token
        # for k in knowledge:
        #     if len(con_knowledge) < self.con_num * self.response_len:
        #         con_knowledge += self.tokenizer.encode("<knowledge> " + k)[:-1]  # skip </s> and lang_token
        #     else:
        #         break
        con_context = con_knowledge[:self.con_num * self.response_len] + his
        return torch.tensor(con_context), torch.tensor(response)

    def __len__(self):
        return len(self.context)

    @staticmethod
    def collate_fn(data):
        context, response = zip(*data)
        context = pad_sequence(context, batch_first=True, padding_value=1)
        response = pad_sequence(response, batch_first=True, padding_value=1)
        return {
            'context': context,  # [batch_size, con_num * response_len + context_len]
            'context_mask': context.ne(1).long().detach(),
            'response': response,  # [batch_size, response_len]
            'response_mask': response.ne(1).long().detach()
        }


class MKGCData(Dataset):
    def __init__(self, context, knowledge, response, rank_tokenizer, gen_tokenizer, mode=1, context_len=256,
                 response_len=128, lang_code=250004, neg_num=7, pad_none=True):
        """

        Args:
            mode: 知识库中正样本数量
            lang_code: 对话所属语言所对应的语言编号
            # con_num: 生成对话所拼接的知识数量
            neg_num: 知识库中负样本数量
            pad_none: train时设置为true，以打乱负样本
        """
        super(MKGCData, self).__init__()
        self.context = context
        self.knowledge = knowledge
        self.response = response

        self.rank_tokenizer = rank_tokenizer
        self.gen_tokenizer = gen_tokenizer

        self.mode = mode
        self.context_len = context_len
        self.response_len = response_len
        self.lang_code = lang_code
        self.neg_num = neg_num
        self.pad_none = pad_none
        print(f"MKGCData, context_len {context_len}, response_len {response_len}, lang_code {lang_code}, "
              f"neg_num {neg_num}, pad_none {pad_none}")

    def __getitem__(self, index):
        context = self.context[index]
        response = self.response[index]
        knowledge = self.knowledge[index]

        topic = context[0]

        ranktopic_len = len(self.rank_tokenizer.encode(topic, add_special_tokens=False))
        rank_his = self.rank_tokenizer.encode(topic + ' ' + ' '.join(context[1:]))[:-1]  # default: <cls> xx </s>, skip </s>
        rank_his = rank_his[:ranktopic_len + 1] + rank_his[-(self.context_len - ranktopic_len - 1):] if len(rank_his) > self.context_len else rank_his

        gentopic_len = len(self.gen_tokenizer.encode(topic, add_special_tokens=False))
        gen_his = self.gen_tokenizer.encode(topic + ' ' + ' '.join(context[1:]))[:-2]  # default: xx </s> lang_token, skip </s> and lang_token
        gen_his = gen_his[:gentopic_len] + gen_his[-(self.context_len - gentopic_len):] if len(gen_his) > self.context_len else gen_his

        neg = knowledge[self.mode:]
        if self.pad_none:  # shuffle negative knowledges
            random.shuffle(neg)
            neg = neg + ['<none>'] * (self.neg_num - len(neg))
        neg = neg[:self.neg_num]
        # print(f"total {len(knowledge)}, pos {len(knowledge[:self.mode])}, neg {len(neg)}")
        knowledge = knowledge[:self.mode] + neg

        response = [self.lang_code] + self.gen_tokenizer.encode(response, truncation=True, max_length=self.response_len)[:-1]  # skip lang_token and put lang_token forward

        gen_know = []
        rank_know = []
        for k in knowledge:
            rk = self.rank_tokenizer.encode(' <knowledge> ' + k, truncation=True, max_length=self.response_len)
            rank_know.append(torch.tensor(rk))
            gk = self.gen_tokenizer.encode(' <knowledge> ' + k, truncation=True, max_length=self.response_len+1)[:-1]  # skip lang_token
            gen_know.append(torch.tensor(gk))
        return torch.tensor(rank_his), rank_know, torch.tensor(gen_his), gen_know, torch.tensor(response)

    def __len__(self):
        return len(self.context)

    @staticmethod
    def collate_fn(data):
        rank_his, rank_know, gen_his, gen_know, response = zip(*data)
        rank_know = sum(rank_know, [])
        gen_know = sum(gen_know, [])
        rank_his = pad_sequence(rank_his, batch_first=True, padding_value=1)
        rank_know = pad_sequence(rank_know, batch_first=True, padding_value=1)
        gen_his = pad_sequence(gen_his, batch_first=True, padding_value=1)
        gen_know = pad_sequence(gen_know, batch_first=True, padding_value=1)
        response = pad_sequence(response, batch_first=True, padding_value=1)
        b_s, _ = gen_his.size()
        _, gk_l = gen_know.size()
        _, rk_l = rank_know.size()
        # print(f"batch_size {b_s}, gk_len {gk_l}, rk_len {rk_l}, "
        #       f"rank_knowledge {rank_know.size()}, generate_knowledge {gen_know.size()}")
        return {
            'rank_context': rank_his,  # [batch_size, context_len]
            'rank_knowledge': rank_know.reshape(b_s, -1, rk_l),  # [batch_size, pool_size, knowledge_len]
            'generate_context': gen_his,  # [batch_size, context_len]
            'generate_knowledge': gen_know.reshape(b_s, -1, gk_l),  # [batch_size, pool_size, knowledge_len]
            'response': response  # [batch_size, response_len]
        }

