import os.path
from collections import Counter

import torch
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import DataLoader
from torchtext.data.utils import get_tokenizer
from torchtext.vocab import vocab
from tqdm import tqdm

from utils.log_helper import log_init

logger = log_init("data_helper")

tokenizer_dict = {'de': get_tokenizer('spacy', language='de_core_news_sm'),
                  'en': get_tokenizer('spacy', language='en_core_web_sm')}


def build_vocab(tokenizer, filepath, min_freq, specials=None):
    if specials is None:
        specials = ['<unk>', '<pad>', '<bos>', '<eos>']
    counter = Counter()

    with open(filepath, encoding='utf8') as f:
        for line in tqdm(list(f), ncols=100):
            counter.update(tokenizer(line))
    vocab_ = vocab(counter, specials=specials, min_freq=min_freq)
    vocab_.set_default_index(vocab_['<unk>'])
    return vocab_


class LoadEnglishGermanDataset():
    def __init__(self, train_file_paths=None, tokenizer=None, batch_size=2, min_freq=1):
        self.tokenizer = tokenizer
        logger.debug('begin build de_vocab and en_vocab')
        self.de_vocab = build_vocab(self.tokenizer['de'], filepath=train_file_paths[0], min_freq=min_freq)
        self.en_vocab = build_vocab(self.tokenizer['en'], filepath=train_file_paths[1], min_freq=min_freq)
        logger.debug(f'de_vocab size = {len(self.de_vocab)}')
        logger.debug(f'en_vocab size = {len(self.en_vocab)}')
        self.specials = ['<unk>', '<pad>', '<bos>', '<eos>']
        self.PAD_IDX = self.de_vocab['<pad>']
        self.BOS_IDX = self.de_vocab['<bos>']
        self.EOS_IDX = self.de_vocab['<eos>']
        self.batch_size = batch_size

    def data_process(self, filepaths):
        '''
        将每一句话中的每一个词根据字典转换成索引的形式
        :param filepaths:
        :return:
        '''
        raw_de_iter = iter(open(filepaths[0], encoding='utf8'))
        raw_en_iter = iter(open(filepaths[1], encoding='utf8'))
        data = []
        _, de_filename = os.path.split(filepaths[0])
        _, en_filename = os.path.split(filepaths[1])
        logger.debug(f'begin processing {de_filename}/{en_filename}')
        for raw_item in tqdm(list(zip(raw_de_iter, raw_en_iter)), ncols=100):
            de_tensor = torch.tensor([self.de_vocab[token] for token in self.tokenizer['de'](raw_item[0].rstrip('\n'))],
                                     dtype=torch.long)
            en_tensor = torch.tensor([self.en_vocab[token] for token in self.tokenizer['en'](raw_item[1].rstrip('\n'))],
                                     dtype=torch.long)
            data.append((de_tensor, en_tensor))
        logger.debug(f'{de_filename}/{en_filename} dataset processing finished')
        logger.debug(f'{de_filename}/{en_filename} dataset size = {len(data)}')
        return data

    def generate_batch(self, data_batch):
        de_batch, en_batch = [], []

        for (de_item, en_item) in data_batch:
            # 编码器输入序列不需要加起止符
            de_batch.append(de_item)
            # 在每个 idx 序列的首位加上 起始 token 和 结束 token
            en = torch.cat([torch.tensor([self.BOS_IDX]), en_item, torch.tensor([self.EOS_IDX])], dim=0)
            en_batch.append(en)

        # 以最长的序列为标准进行填充
        de_batch = pad_sequence(de_batch, padding_value=self.PAD_IDX)
        en_batch = pad_sequence(en_batch, padding_value=self.PAD_IDX)

        return de_batch, en_batch

    def generate_square_subsequent_mask(self, sz, device):
        mask = (torch.triu(torch.ones((sz, sz), device=device)) == 1).transpose(0, 1)
        mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
        return mask

    def create_mask(self, src, tgt, device='cpu'):
        """
        :param src:
        :param tgt:
        :param device:
        :return:
        """
        src_seq_len = src.shape[0]
        tgt_seq_len = tgt.shape[0]
        # Decoder 的注意力 Mask 输入，用于掩盖当前 position 之后的 position，所以这里是一个对称矩阵
        # [tgt_len, tgt_len]
        tgt_mask = self.generate_square_subsequent_mask(tgt_seq_len, device)
        # Encoder 的注意力 Mask 输入，这部分其实对于 Encoder 来说是没有用的，所以这里全是 0
        src_mask = torch.zeros((src_seq_len, src_seq_len), device=device).type(torch.bool)

        # 用于 mask 掉 Encoder 的 Token 序列中的 padding 部分,[batch_size, src_len]
        src_padding_mask = (src == self.PAD_IDX).transpose(0, 1)
        # 用于 mask 掉 Decoder 的 Token 序列中的 padding 部分,batch_size, tgt_len
        tgt_padding_mask = (tgt == self.PAD_IDX).transpose(0, 1)

        return src_mask, tgt_mask, src_padding_mask, tgt_padding_mask

    def load_train_val_test_data(self, train_file_paths, val_file_paths, test_file_paths):
        train_data = self.data_process(train_file_paths)
        val_data = self.data_process(val_file_paths)
        test_data = self.data_process(test_file_paths)

        train_iter = DataLoader(train_data, batch_size=self.batch_size, shuffle=True, collate_fn=self.generate_batch)
        valid_iter = DataLoader(val_data, batch_size=self.batch_size, shuffle=True, collate_fn=self.generate_batch)
        test_iter = DataLoader(test_data, batch_size=self.batch_size, shuffle=True, collate_fn=self.generate_batch)

        return train_iter, valid_iter, test_iter
