# -*- coding:utf-8 -*-
import random
import torch
from torch.utils.data import Dataset, DataLoader
from utils.vocab import Vocabulary, load_vocab


def padding(words, sent_maxlen):
    if len(words) < sent_maxlen:
        words = words + [0] * (sent_maxlen - len(words))
    return words[:sent_maxlen]


class PairwiseDataset(Dataset):
    def __init__(self, samples: list, vocab: Vocabulary, sent1_maxlen: int, sent2_maxlen: int):
        assert len(samples) % 2 == 0
        self._data = []
        self.vocab = vocab
        self.sent1_maxlen = sent1_maxlen
        self.sent2_maxlen = sent2_maxlen

        for i in range(len(samples) // 2):
            l1, q1, c1 = samples[2 * i]
            l2, q2, c2 = samples[2 * i + 1]

            assert l1 > l2
            assert q1 == q2

            q_ids = vocab.convert2idx(q1.split())
            c1_ids = vocab.convert2idx(c1.split())
            c2_ids = vocab.convert2idx(c2.split())

            q_ids = padding(q_ids, sent1_maxlen)
            c1_ids = padding(c1_ids, sent2_maxlen)
            c2_ids = padding(c2_ids, sent2_maxlen)

            self._data += [(q_ids, c1_ids, c2_ids)]
        pass

    def __getitem__(self, index):
        sample = self._data[index]
        query_tensor = torch.LongTensor(sample[0])
        pos_tensor = torch.LongTensor(sample[1])
        neg_tensor = torch.LongTensor(sample[2])

        return query_tensor, pos_tensor, neg_tensor

    def __len__(self):
        return len(self._data)


class PointwiseDataset(Dataset):
    def __init__(self, samples: list, vocab: Vocabulary, sent1_maxlen: int, sent2_maxlen: int):
        self._data = []
        for label, query, candidate in samples:
            q_ids = vocab.convert2idx(query.split())
            c_ids = vocab.convert2idx(candidate.split())

            q_ids = padding(q_ids, sent1_maxlen)
            c_ids = padding(c_ids, sent2_maxlen)

            self._data += [(label, q_ids, c_ids)]

    def __getitem__(self, index):
        label, query, candidate = self._data[index]

        query_tensor = torch.LongTensor(query)
        candidate_tensor = torch.LongTensor(candidate)

        return query_tensor, candidate_tensor, label

    def __len__(self):
        return len(self._data)


class PointwiseDataLoader(DataLoader):
    def __init__(self, dataset, batch_size, shuffle, groups):
        self._groups = groups
        super(PointwiseDataLoader, self).__init__(dataset,
                                                  batch_size=batch_size,
                                                  shuffle=shuffle)

    def groups(self):
        return self._groups


class GANDataset(Dataset):
    def __init__(self, samples: dict, vocab: Vocabulary, sent1_maxlen: int, sent2_maxlen: int, pool_size: int):
        self._data = []
        for query in samples:
            q_ids = vocab.convert2idx(query.split())
            q_ids = padding(q_ids, sent1_maxlen)

            pos_sent_ids, neg_sent_ids = [], []

            for pos_sent in samples[query]['pos']:
                pos_c_ids = vocab.convert2idx(pos_sent.split())
                pos_c_ids = padding(pos_c_ids, sent2_maxlen)
                pos_sent_ids += [pos_c_ids]

            for neg_sent in samples[query]['neg']:
                neg_c_ids = vocab.convert2idx(neg_sent.split())
                neg_c_ids = padding(neg_c_ids, sent2_maxlen)
                neg_sent_ids += [neg_c_ids]

            if len(neg_sent_ids) != pool_size:
                print('ignore query({}): {}'.format(len(neg_sent_ids), query))
                continue

            self._data += [(q_ids, pos_sent_ids, neg_sent_ids)]

        pass

    def __getitem__(self, index):
        query_id, pos_sent_ids, neg_sent_ids = self._data[index]
        rid = random.randint(0, len(pos_sent_ids)-1)
        pos_sent_id = pos_sent_ids[rid]

        query_tensor = torch.LongTensor([query_id for _ in range(len(neg_sent_ids)+1)])
        pos_tensor = torch.LongTensor([pos_sent_id for _ in range(len(neg_sent_ids)+1)])

        neg_sent_ids += [pos_sent_id]
        neg_tensor = torch.LongTensor(neg_sent_ids)
        return query_tensor, pos_tensor, neg_tensor
        pass

    def __len__(self):
        return len(self._data)
        pass


def load_pointwise_raw_data(data_path):
    samples, groups = [], []
    gid = ""
    g_len = 0
    with open(data_path, 'r', encoding="utf-8") as fr:
        for line in fr:
            ss = line.rstrip().split("\t")
            assert len(ss) == 3
            label, query, candidate = int(ss[0]), ss[1], ss[2]
            if query != gid and g_len > 0:
                groups += [g_len]
                g_len = 0

            gid = query
            g_len += 1

            samples += [(label, query, candidate)]
    groups += [g_len]
    assert sum(groups) == len(samples)
    print("load {}-{} pointwise samples from {} over!".format(len(samples), len(groups), data_path))
    return samples, groups


def load_gan_raw_data(data_path):
    gan_data = dict()
    count = 0
    with open(data_path, 'r', encoding='utf-8') as fr:
        for line in fr:
            ss = line.rstrip().split('\t')
            assert len(ss) == 3
            count += 1
            label, query, candidate = int(ss[0]), ss[1], ss[2]
            if query not in gan_data:
                gan_data[query] = {"pos": [], "neg": []}
            if label > 0:
                gan_data[query]['pos'].append(candidate)
            else:
                gan_data[query]['neg'].append(candidate)
    print("load {}-{} gan samples from {} over!".format(count, len(gan_data), data_path))
    return gan_data
    pass


def load_pairwise_data(data_path, vocab_path, batch_size, shuffle, sent1_maxlen, sent2_maxlen):
    vocab = load_vocab(vocab_path)

    samples, groups = load_pointwise_raw_data(data_path)
    dataset = PairwiseDataset(samples, vocab, sent1_maxlen=sent1_maxlen, sent2_maxlen=sent2_maxlen)

    data_loader = DataLoader(dataset=dataset,
                             batch_size=batch_size,
                             shuffle=shuffle)

    return data_loader


def load_pointwise_data(data_path, vocab_path, batch_size, shuffle, sent1_maxlen, sent2_maxlen):
    vocab = load_vocab(vocab_path)

    samples, groups = load_pointwise_raw_data(data_path)

    dataset = PointwiseDataset(samples, vocab, sent1_maxlen=sent1_maxlen, sent2_maxlen=sent2_maxlen)

    data_loader = PointwiseDataLoader(dataset=dataset,
                                      batch_size=batch_size,
                                      shuffle=shuffle,
                                      groups=groups)

    return data_loader


def load_gan_data(data_path, vocab_path, batch_size, shuffle, sent1_maxlen, sent2_maxlen, pool_size=200):
    vocab = load_vocab(vocab_path)
    samples = load_gan_raw_data(data_path)

    dataset = GANDataset(samples, vocab,
                         sent1_maxlen=sent1_maxlen,
                         sent2_maxlen=sent2_maxlen,
                         pool_size=pool_size)
    data_loader = DataLoader(dataset=dataset,
                             batch_size=batch_size,
                             shuffle=shuffle)
    return data_loader


if __name__ == '__main__':
    valid_iter = load_pointwise_data("../data/stc2-repos.valid.txt",
                                     "../data/vocab.stc.txt",
                                     64, False, 30, 30)
    print(len(valid_iter))
    print(valid_iter.groups)

    for batch_input in valid_iter:
        print(batch_input)
        break

    data_iter = load_pairwise_data("../data/stc2-repos.train.txt",
                                   "../data/vocab.stc.txt",
                                   64, False, 30, 30)

    for batch_input in data_iter:
        print(batch_input[0].size())
        print(batch_input[1].size())
        print(batch_input[2].size())
        break
    pass
