from torch import LongTensor, stack, zeros
from torch import cat as torch_cat
from random import choice as random_choice
from random import randint
from torch.utils.data import Dataset
from pytorch_pretrained_bert import BertTokenizer


class TrainDataset(Dataset):
    tokenizer = BertTokenizer.from_pretrained("bert-base-chinese")

    def __init__(self, sentence_node_pair, nodes_set, config, part):
        self.config = config
        self.part = part
        self.sentence_node_pair = sentence_node_pair
        self.nodes_set = nodes_set
        self.length = len(sentence_node_pair)
        self.nodes_num = len(nodes_set)
        self.max_seq_len = config.max_seq_len
        self.tokenizer = BertTokenizer.from_pretrained("bert-base-chinese")
        self.build_tensor()

    def __len__(self):
        return self.length

    def build_tensor(self):
        self.lens_s, self.lens_n = [], []
        self.sentences_ids, self.nodes_ids = [], []
        for pair in self.sentence_node_pair:
            sentence, index = pair
            tokenized_sentence = self.tokenizer.tokenize(sentence)
            self.lens_s.append(min(len(tokenized_sentence), self.max_seq_len))
            if len(tokenized_sentence) < self.max_seq_len:
                tokenized_sentence += ["[PAD]"] * (self.max_seq_len - len(tokenized_sentence))
            tokenized_sentence = tokenized_sentence[: self.max_seq_len]
            self.sentences_ids.append([LongTensor(self.tokenizer.convert_tokens_to_ids(tokenized_sentence)), index])

        for node in self.nodes_set:
            tokenized_node = self.tokenizer.tokenize(node)
            self.lens_n.append(min(len(tokenized_node), self.max_seq_len))
            if len(tokenized_node) < self.max_seq_len:
                tokenized_node += ["[PAD]"] * (self.max_seq_len - len(tokenized_node))
            tokenized_node = tokenized_node[: self.max_seq_len]
            self.nodes_ids.append(self.tokenizer.convert_tokens_to_ids(tokenized_node))

        self.nodes_ids = LongTensor(self.nodes_ids)

    def __getitem__(self, idx):
        positive_sample_sentence, index = self.sentences_ids[idx]
        positive_sample_node = self.nodes_ids[index]
        lens_s, lens_n = [], []

        sample_sentences = []
        sample_nodes = []

        while len(sample_nodes) < self.config.negative_sample_size:
            node_index = random_choice([_ for _ in range(self.nodes_num)])
            if any(self.nodes_ids[node_index] - positive_sample_node):
                sample_sentences.append(positive_sample_sentence)
                sample_nodes.append(self.nodes_ids[node_index])
                lens_s.append(self.lens_s[idx])
                lens_n.append(self.lens_n[node_index])

        ppos = randint(0, self.config.negative_sample_size)
        lens_s.insert(ppos, self.lens_s[idx])
        lens_n.insert(ppos, self.lens_n[index])
        sample_sentences.insert(ppos, positive_sample_sentence)
        sample_nodes.insert(ppos, positive_sample_node)

        sample_sentences_tensor = stack(sample_sentences)
        sample_nodes_tensor = stack(sample_nodes)
        sample_labels = zeros(sample_nodes_tensor.shape[0]).long()
        sample_labels[ppos] = 1
        return sample_sentences_tensor, sample_nodes_tensor, sample_labels, lens_s, lens_n, self.part

    @staticmethod
    def sequence(tokenizer, s, max_seq_len):
        tokens = tokenizer.tokenize(s)
        lens = min(len(tokens), max_seq_len)
        if len(tokens) < max_seq_len:
            tokens += ['[PAD]'] * (max_seq_len - len(tokens))
        tokens = tokens[: max_seq_len]
        return tokenizer.convert_tokens_to_ids(tokens), lens

    @staticmethod
    def collate_fn(data):
        sentences_tensor, nodes_tensor, labels_tensor, lens_s, lens_n = [], [], [], [], []
        for d in data:
            sentences_tensor.append(d[0])
            nodes_tensor.append(d[1])
            labels_tensor.append(d[2])
            lens_s += d[3]
            lens_n += d[4]
        return torch_cat(sentences_tensor), torch_cat(nodes_tensor), torch_cat(labels_tensor), lens_s, lens_n, data[0][5]


class TestDataset(Dataset):
    def __init__(self, sentence_node_pair, nodes_set, config, part):
        self.config = config
        self.part = part
        self.sentence_node_pair = sentence_node_pair
        self.nodes_set = nodes_set
        self.length = len(sentence_node_pair)
        self.nodes_num = len(nodes_set)
        self.max_seq_len = config.max_seq_len
        self.tokenizer = BertTokenizer.from_pretrained("bert-base-chinese")
        self.build_tensor()

    def __len__(self):
        return self.length

    def build_tensor(self):
        self.lens_s, self.lens_n = [], []
        self.sentences_ids, self.nodes_ids = [], []
        for pair in self.sentence_node_pair:
            sentence, index = pair
            tokenized_sentence = self.tokenizer.tokenize(sentence)
            self.lens_s.append(min(len(tokenized_sentence), self.max_seq_len))
            if len(tokenized_sentence) < self.max_seq_len:
                tokenized_sentence += ["[PAD]"] * (self.max_seq_len - len(tokenized_sentence))
            tokenized_sentence = tokenized_sentence[: self.max_seq_len]
            self.sentences_ids.append([LongTensor(self.tokenizer.convert_tokens_to_ids(tokenized_sentence)), index])

        for node in self.nodes_set:
            tokenized_node = self.tokenizer.tokenize(node)
            self.lens_n.append(min(len(tokenized_node), self.max_seq_len))
            if len(tokenized_node) < self.max_seq_len:
                tokenized_node += ["[PAD]"] * (self.max_seq_len - len(tokenized_node))
            tokenized_node = tokenized_node[: self.max_seq_len]
            self.nodes_ids.append(self.tokenizer.convert_tokens_to_ids(tokenized_node))

        self.nodes_ids = LongTensor(self.nodes_ids)

    def __getitem__(self, idx):
        positive_sample_sentence, index = self.sentences_ids[idx]
        # positive_sample_node = self.nodes_ids[index]
        lens_s, lens_n = [], []
        sample_sentences, sample_nodes = [], []

        for index in range(self.nodes_num):
            sample_sentences.append(positive_sample_sentence)
            sample_nodes.append(self.nodes_ids[index])

            lens_s.append(self.lens_s[idx])
            lens_n.append(self.lens_n[index])

        sample_sentences_tensor = stack(sample_sentences)
        sample_nodes_tensor = stack(sample_nodes)
        sample_labels = zeros(sample_nodes_tensor.shape[0]).long()
        sample_labels[index] = 1

        return sample_sentences_tensor, sample_nodes_tensor, sample_labels, lens_s, lens_n, self.part

    @staticmethod
    def collate_fn(data):
        sentences_tensor, nodes_tensor, labels, lens_s, lens_n = [], [], [], [], []
        for d in data:
            sentences_tensor.append(d[0])
            nodes_tensor.append(d[1])
            labels.append(d[2])
            lens_s += d[3]
            lens_n += d[4]
        return torch_cat(sentences_tensor), torch_cat(nodes_tensor), torch_cat(labels), lens_s, lens_n, data[0][4]


class BidirectionOneShotIterator(object):
    def __init__(self, dataloader_entity, dataloader_relation):
        self.iterator_entity = self.one_shot_iterator(dataloader_entity)
        self.iterator_relation = self.one_shot_iterator(dataloader_relation)
        self.step = 0


    def __next__(self):
        self.step += 1
        if self.step % 2:
            return next(self.iterator_entity)
        else:
            return next(self.iterator_relation)


    @staticmethod
    def one_shot_iterator(dataloader):
        while True:
            for data in dataloader:
                yield data
