# -*- coding: utf-8 -*-

import numpy as np
from collections import Counter
from torch.utils.data import Dataset
import os

PRESERVE_WORDS = ["<SOS>", "<EOS>", "<PAD>", "<UNK>"]


# 类的参数是所有token的列表，获得一些token2id的方法
class Vocab:
    def __init__(self, token_list,
                 preserve_words=PRESERVE_WORDS,
                 max_vocab_size=20000):
        """

        :param token_list:
        :param preserve_words: <SOS> 0 <EOS> 1 <PAD> 2 <UNK> 3
        :param max_vocab_size:
        """
        self.token_list = preserve_words + token_list
        self.token_list = self.token_list[:max_vocab_size]
        # dict {"I": 0, "like": 1}
        self.token2index_dict = dict([(token, i) for (i, token) in enumerate(self.token_list)])

    # single token or single index
    def token2index(self, token):
        return self.token2index_dict.get(token, 3)

    def index2token(self, idx):
        return self.token_list[idx]

    # list
    def tokens2indexs(self, tokens):
        return [self.token2index_dict.get(token, 3) for token in tokens]

    def indexs2tokens(self, idxs):
        return [self.token_list[idx] for idx in idxs]

    def get_vocab_size(self):
        return len(self.token_list)


# 获得一个包含所有source_token的列表，包含所有target_token的列表，souce_vocab类、target_vocab类
class DataProcessor:
    def __init__(self, src_filename, tgt_filename, src_max_vocab_size=20000, tgt_max_vocab_size=20000):
        self.src_filename = src_filename
        self.tgt_filename = tgt_filename

        # TODO: src 和 tgt vocab size 分别设置
        self.src_tokens_list, self.src_vocab = self.load_corpus_and_vocab(src_filename, src_max_vocab_size)
        self.tgt_tokens_list, self.tgt_vocab = self.load_corpus_and_vocab(tgt_filename, tgt_max_vocab_size)

    @staticmethod
    def load_corpus_and_vocab(corpus_filename, max_vocab_size):
        # corpus_filename: data/en.txt or data/zh.txt
        with open(corpus_filename, encoding='utf-8') as f:
            content = f.read()  # string
        lines = content.split("\n")

        def _parse_line(line):
            tokens = line.strip().split()  # strip: 吧 string 的左右 \n \t " " 全部删除
            return tokens

        tokens_list = [_parse_line(line) for line in lines]  # [["I", "like", "China"], ["I", "hate", "dog"]]
        counter = Counter()
        for tokens in tokens_list:
            counter.update(tokens)
        # [("I", 2), ("like", 1), ...]
        token_freq_list = counter.most_common(max_vocab_size)
        vocab_tokens = [x[0] for x in token_freq_list]
        vocab = Vocab(vocab_tokens, max_vocab_size=max_vocab_size)

        return tokens_list, vocab

    def get_all_corpus_and_vocab(self):
        # src ==> tgt 返回值的时候也逻辑上先返回 src string ==> index
        return self.src_tokens_list, self.src_vocab, self.tgt_tokens_list, self.tgt_vocab


class DataModulator:
    """
    DataModulator: 1. string ==> index 2. crop 3. padding
    """

    def __init__(self, sent_length):
        self.sent_length = sent_length

    def all_items_modulate(self, tokens_list, vocab):
        """

        :param tokens_list: e.g., [["I", "like", "China"], ["I", "hate", "dog"], ...]
        :param vocab:
        :return:
        """
        # dimension: [68982, sent_length]
        # 多个这种形式的数据([i1, i2, i3, 1, 2, 2, 2, 2], 4)
        all_data = [self.single_item_modulate(tokens, vocab) for tokens in tokens_list]
        return all_data

    def single_item_modulate(self, tokens, vocab):
        """
        1. string ==> index
        2. 句子之前添加 <sos> (0) ==> question <sos> [[[[删除，不需要这一步]]]]
        response <sos> 需要吗 ? 1. 需要 2. 不需要 （1，<sos> w1 w2 w3 2，w1 w2 w3 <eos> ）
                              w1  w2 w3 <eos> ...
        response_input:     <sos> w1 w2 w3 <eos> <pad> <pad> 作为 decoder 的输入
        response_output:    w1 w2 w3 <eos> <pad> <pad> <pad>

        response_input: <sos> w1 w2 w3
        response_output: w1 w2 w3 <pad>

        response_input: <sos> w1 w2 w3
        response_output: w1 w2 w3 w4

        # 这部分逻辑是在 Model.forward 方法中的，不是数据处理中的
        def shift(tensor, sos_id):
            batch_sos = torch.ones([tensor.size(0), 1], device=tensor.device, dtype=torch.long) * sos_id
            return torch.cat([batch_sos, tensor[:, :-1]], -1)

        # 1 <sos> <pad> <pad> <pad> ... ==> w1
        # 2 <sos> w1 <pad> <pad> ... ==> w2
        # 3 <sos> w1 w2 <pad> ... ==> w3
        # 4 ...

        3. 句子之后添加 <eos> (1)
        4. crop 句子过长，截断
        5. padding 句子短了，补齐 添加 <pad> (2)

        :param tokens: a list of string, e.g., ["I", "like", "China"]
        :param vocab: an instance of Vocab
        :return: a list of int, e.g., [10, 11, 101, 1, 2, 2, 2, 2, 2, ...]
        """
        # 1. token2index
        index_s = vocab.tokens2indexs(tokens)
        # 2. append eos
        index_s = index_s + [1]
        efficient_length = len(index_s)
        # 3. cropping
        index_s = index_s[: self.sent_length]
        # i1 i2 i3 <eos> ...
        efficient_length = min(efficient_length, self.sent_length)
        # 4. padding
        if len(index_s) < self.sent_length:
            index_s = index_s + [2] * (self.sent_length - len(index_s))

        # 原始的sentence经过了 1. token2index, 2. 添加 <eos> 3. cropping 4. padding
        index_s = np.asarray(index_s, dtype=np.long)

        return index_s, efficient_length


class DataSpliter:
    def __init__(self, src_token_list, tgt_token_list, test_ratio):
        self.src_token_list = src_token_list
        self.tgt_token_list = tgt_token_list
        self.test_ratio = test_ratio

    def load_train_test_data_list(self):
        test_num = int(len(self.src_token_list) * self.test_ratio)
        train_src_list = self.src_token_list[test_num:]
        train_tgt_list = self.tgt_token_list[test_num:]
        test_src_list = self.src_token_list[:test_num]
        test_tgt_list = self.tgt_token_list[:test_num]
        return train_src_list, train_tgt_list, test_src_list, test_tgt_list


class TranslateDataset(Dataset):
    def __init__(self,
                 src_tokens_list, src_vocab, src_sent_length,
                 tgt_tokens_list, tgt_vocab, tgt_sent_length):
        super(TranslateDataset, self).__init__()
        # sentence length is not sentence number
        # I like China
        # I have dog
        src_modulator = DataModulator(src_sent_length)
        self.src_all_data = src_modulator.all_items_modulate(src_tokens_list, src_vocab)

        tgt_modulator = DataModulator(tgt_sent_length)
        self.tgt_all_data = tgt_modulator.all_items_modulate(tgt_tokens_list, tgt_vocab)

        # 数量相同
        assert len(self.src_all_data) == len(self.tgt_all_data)
        self.sample_num = len(self.tgt_all_data)

    def __len__(self):
        return self.sample_num

    def __getitem__(self, item):
        # src_ids (np.ndarray [L, ]), src_len (scalar)
        # tgt_ids (np.ndarray [L, ]), tgt_len (scalar)
        return self.src_all_data[item][0], self.src_all_data[item][1], \
               self.tgt_all_data[item][0], self.tgt_all_data[item][1]


if __name__ == '__main__':
    # cn ==> en
    data_processor = DataProcessor("data/zh.txt", "data/en.txt")
    src_tokens_list, src_vocab, tgt_tokens_list, tgt_vocab = data_processor.get_all_corpus_and_vocab()
    data_spliter = DataSpliter(src_tokens_list, tgt_tokens_list, 0.2)
    train_src_list, train_tgt_list, test_src_list, test_tgt_list = data_spliter.load_train_test_data_list()

    translate_dataset = TranslateDataset(test_src_list, src_vocab, 100,
                                         test_tgt_list, tgt_vocab, 100)
    from torch.utils.data import DataLoader

    translate_dataloader = DataLoader(translate_dataset, batch_size=32)

    for batch_src, batch_src_len, batch_tgt, batch_tgt_len in translate_dataloader:
        print(batch_src.shape, end=" - ")
        print(batch_src_len.shape, end=" - ")
        print(batch_tgt.shape, end=" - ")
        print(batch_tgt_len.shape)
