import json
import os
import random

import numpy as np
import torch
from torch.autograd import Variable
from torch.utils.data import Dataset

import config
from utils import chinese_tokenizer_load, english_tokenizer_load, valid_line

DEVICE = config.device


def subsequent_mask(size):
    """Mask out subsequent positions."""
    # 设定subsequent_mask矩阵的shape
    attn_shape = (1, size, size)

    # 生成一个右上角(不含主对角线)为全1，左下角(含主对角线)为全0的subsequent_mask矩阵
    subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype("uint8")

    # 返回一个右上角(不含主对角线)为全False，左下角(含主对角线)为全True的subsequent_mask矩阵
    return torch.from_numpy(subsequent_mask) == 0


class Batch:
    """Object for holding a batch of data with mask during training."""

    def __init__(self, src_text, tgt_text, src, trg=None, src_pad_id=0, tgt_pad_id=0):
        self.src_text = src_text
        self.tgt_text = tgt_text
        src = src.to(DEVICE)
        self.src = src
        # 对于当前输入的句子非空部分进行判断成bool序列
        # 并在seq length前面增加一维，形成维度为 1×seq length 的矩阵
        self.src_mask = (src != src_pad_id).unsqueeze(-2)
        # 如果输出目标不为空，则需要对decoder要使用到的target句子进行mask
        if trg is not None:
            trg = trg.to(DEVICE)
            # decoder要用到的target输入部分
            self.tgt = trg[:, :-1]
            # decoder训练时应预测输出的target结果
            self.tgt_y = trg[:, 1:]
            # 将target输入部分进行attention mask
            self.tgt_mask = self.make_std_mask(self.tgt, tgt_pad_id)
            # 将应输出的target结果中实际的词数进行统计
            self.ntokens = (self.tgt_y != src_pad_id).data.sum()

    # Mask掩码操作
    @staticmethod
    def make_std_mask(tgt, pad):
        """Create a mask to hide padding and future words."""
        tgt_mask = (tgt != pad).unsqueeze(-2)
        tgt_mask = tgt_mask & Variable(subsequent_mask(tgt.size(-1)).type_as(tgt_mask.data))
        return tgt_mask


class MTDataset(Dataset):
    def __init__(self, data_path, max_input_length=128, max_target_length=128):
        super().__init__()
        self.out_en_sent, self.out_cn_sent = self.get_dataset(data_path, sort=True)
        self.tokenizer_en = english_tokenizer_load()
        self.tokenizer_zh = chinese_tokenizer_load()
        self.src_pad_id = self.tokenizer_en.pad_token_id  # 0
        self.tgt_pad_id = self.tokenizer_zh.pad_token_id  # 0
        # self.BOS = self.tokenizer_en.bos_token_id  # 2
        # self.EOS = self.tokenizer_en.eos_token_id  # 3
        self.max_input_length = max_input_length
        self.max_target_length = max_target_length

    @staticmethod
    def len_argsort(seq):
        """传入一系列句子数据(分好词的列表形式)，按照句子长度排序后，返回排序后原来各句子在数据中的索引下标"""
        return sorted(range(len(seq)), key=lambda x: len(seq[x]))

    def get_dataset(self, data_path, sort=False):
        """把中文和英文按照同样的顺序排序, 以英文句子长度排序的(句子下标)顺序为基准"""
        with open(data_path, "r", encoding="utf8") as f:
            dataset = json.load(f)
        # out_en_sent = []
        # out_cn_sent = []
        # for idx, _ in enumerate(dataset):
        #     out_en_sent.append(dataset[idx][0])
        #     out_cn_sent.append(dataset[idx][1])
        out_en_sent, out_cn_sent = list(zip(*dataset))
        if sort:
            sorted_index = self.len_argsort(out_en_sent)
            out_en_sent = [out_en_sent[i] for i in sorted_index]
            out_cn_sent = [out_cn_sent[i] for i in sorted_index]
        return out_en_sent, out_cn_sent

    def __getitem__(self, idx):
        eng_text = self.out_en_sent[idx]
        chn_text = self.out_cn_sent[idx]
        return [eng_text, chn_text]

    def __len__(self):
        return len(self.out_en_sent)

    # def collate_fn(self, batch):
    # src_text = [x[0] for x in batch]
    # tgt_text = [x[1] for x in batch]
    # src_tokens = [
    #     [self.BOS] + self.tokenizer_en.EncodeAsIds(sent) + [self.EOS]
    #     for sent in src_text
    # ]
    # tgt_tokens = [
    #     [self.BOS] + self.tokenizer_zh.EncodeAsIds(sent) + [self.EOS]
    #     for sent in tgt_text
    # ]

    # batch_input = pad_sequence(
    #     [torch.LongTensor(np.array(l_)) for l_ in src_tokens],
    #     batch_first=True,
    #     padding_value=self.PAD,
    # )
    # batch_target = pad_sequence(
    #     [torch.LongTensor(np.array(l_)) for l_ in tgt_tokens],
    #     batch_first=True,
    #     padding_value=self.PAD,
    # )

    # return Batch(src_text, tgt_text, batch_input, batch_target, self.PAD)

    def collate_fn(self, batch):
        src_texts, tgt_texts = list(zip(*batch))
        src_tokens = self.tokenizer_en(
            src_texts,
            padding=True,
            truncation=True,
            max_length=self.max_input_length,
            return_tensors="pt",
        ).input_ids
        tgt_tokens = self.tokenizer_zh(
            tgt_texts,
            padding=True,
            truncation=True,
            max_length=self.max_input_length,
            return_tensors="pt",
        ).input_ids
        return Batch(
            src_texts,
            tgt_texts,
            src_tokens,
            tgt_tokens,
            self.src_pad_id,
            self.tgt_pad_id,
        )


class NewsCommentary(Dataset):
    def __init__(self, data, max_input_length=64, max_target_length=64):
        super().__init__()
        self.out_en_sent, self.out_cn_sent = zip(*data)
        self.tokenizer_en = english_tokenizer_load()
        self.tokenizer_zh = chinese_tokenizer_load()
        self.src_pad_id = self.tokenizer_en.pad_token_id  # 0
        self.tgt_pad_id = self.tokenizer_zh.pad_token_id  # 0
        # self.BOS = self.tokenizer_en.bos_token_id  # 2
        # self.EOS = self.tokenizer_en.eos_token_id  # 3
        self.max_input_length = max_input_length
        self.max_target_length = max_target_length

    def __getitem__(self, idx):
        eng_text = self.out_en_sent[idx]
        chn_text = self.out_cn_sent[idx]
        return [eng_text, chn_text]

    def __len__(self):
        return len(self.out_en_sent)

    def collate_fn(self, batch):
        src_texts, tgt_texts = list(zip(*batch))
        src_tokens = self.tokenizer_en(
            src_texts,
            padding=True,
            truncation=True,
            max_length=self.max_input_length,
            return_tensors="pt",
        ).input_ids
        tgt_tokens = self.tokenizer_zh(
            tgt_texts,
            padding=True,
            truncation=True,
            max_length=self.max_input_length,
            return_tensors="pt",
        ).input_ids
        return Batch(
            src_texts,
            tgt_texts,
            src_tokens,
            tgt_tokens,
            self.src_pad_id,
            self.tgt_pad_id,
        )


class BackTranslatedNews(Dataset):
    def __init__(self, data, max_input_length=64, max_target_length=64):
        super().__init__()
        self.out_en_sent, self.out_cn_sent = zip(*data)
        self.tokenizer_en = english_tokenizer_load()
        self.tokenizer_zh = chinese_tokenizer_load()
        self.src_pad_id = self.tokenizer_en.pad_token_id  # 0
        self.tgt_pad_id = self.tokenizer_zh.pad_token_id  # 0
        # self.BOS = self.tokenizer_en.bos_token_id  # 2
        # self.EOS = self.tokenizer_en.eos_token_id  # 3
        self.max_input_length = max_input_length
        self.max_target_length = max_target_length

    def __getitem__(self, idx):
        eng_text = self.out_en_sent[idx]
        chn_text = self.out_cn_sent[idx]
        return [eng_text, chn_text]

    def __len__(self):
        return len(self.out_en_sent)

    def collate_fn(self, batch):
        src_texts, tgt_texts = list(zip(*batch))
        src_tokens = self.tokenizer_en(
            src_texts,
            padding=True,
            truncation=True,
            max_length=self.max_input_length,
            return_tensors="pt",
        ).input_ids
        tgt_tokens = self.tokenizer_zh(
            tgt_texts,
            padding=True,
            truncation=True,
            max_length=self.max_input_length,
            return_tensors="pt",
        ).input_ids
        return Batch(
            src_texts,
            tgt_texts,
            src_tokens,
            tgt_tokens,
            self.src_pad_id,
            self.tgt_pad_id,
        )


def pre_news_commentary(
    pth: str = os.path.join("..", "data"),
    train_num: int = 200_000,
    valid_num: int = 20_000,
    test_num: int = 20_000,
):
    data_dir = os.path.join(pth, "new_commentary")
    train_file = os.path.join(data_dir, "train.npy")
    valid_file = os.path.join(data_dir, "valid.npy")
    test_file = os.path.join(data_dir, "test.npy")
    if os.path.exists(data_dir):
        train_data = np.load(train_file)
        valid_data = np.load(valid_file)
        test_data = np.load(test_file)
    else:
        n = train_num + valid_num + test_num
        file_name = os.path.join(pth, "news-commentary-v15.en-zh.tsv")
        data = []
        with open(file_name, encoding="utf8") as fp:
            for i, line in enumerate(fp):
                if i > n:
                    break

                if valid_line(line):
                    line = line.split("\t")
                    if len(line) == 2:
                        data.append(line)
        random.shuffle(data)
        train_data = np.array(data[:train_num], dtype=np.unicode_)
        valid_data = np.array(data[train_num : train_num + valid_num], dtype=np.unicode_)
        test_data = np.array(data[train_num + valid_num : n], dtype=np.unicode_)
        os.makedirs(data_dir, exist_ok=True)
        np.save(train_file, train_data)
        np.save(valid_file, valid_data)
        np.save(test_file, test_data)
    return NewsCommentary(train_data), NewsCommentary(valid_data), NewsCommentary(test_data)


def pre_back_translated_news(
    pth: str = os.path.join("..", "data"),
    train_num: int = 200_000,
    valid_num: int = 20_000,
    test_num: int = 20_000,
):
    data_dir = os.path.join(pth, "back_translated_news")
    train_file = os.path.join(data_dir, "train.npy")
    valid_file = os.path.join(data_dir, "valid.npy")
    test_file = os.path.join(data_dir, "test.npy")
    if os.path.exists(data_dir):
        train_data = np.load(train_file)
        valid_data = np.load(valid_file)
        test_data = np.load(test_file)
    else:
        n = train_num + valid_num + test_num
        file_name_en = os.path.join(pth, "news.en")
        file_name_zh = os.path.join(pth, "news.translatedto.zh")
        data_en, data_zh = [], []
        with open(file_name_en, encoding="utf8") as fp:
            for i, line in enumerate(fp):
                if i > n:
                    break

                if valid_line(line):
                    data_en.append(line)
        with open(file_name_zh, encoding="utf8") as fp:
            for i, line in enumerate(fp):
                if i > n:
                    break

                if valid_line(line):
                    data_zh.append(line)
        data = list(zip(data_en, data_zh))
        random.shuffle(data)
        train_data = np.array(data[:train_num], dtype=np.unicode_)
        valid_data = np.array(data[train_num : train_num + valid_num], dtype=np.unicode_)
        test_data = np.array(data[train_num + valid_num : n], dtype=np.unicode_)
        os.makedirs(data_dir, exist_ok=True)
        np.save(train_file, train_data)
        np.save(valid_file, valid_data)
        np.save(test_file, test_data)
    return BackTranslatedNews(train_data), BackTranslatedNews(valid_data), BackTranslatedNews(test_data)
