"""
@author : linrh
@homepage : https://gitee.com/linrh-DUT
@version: 1.0.0
@when : 2023/5/17
@file: data_loader.py
"""

# Multi30k数据集
# 常用的机器翻译数据集
from torchtext.datasets import Multi30k
# 构建数据集的词典的工具
from torchtext.vocab import build_vocab_from_iterator
from torch.utils import data
from torch.nn.utils.rnn import pad_sequence
import torch


class DataLoader:
    """
    数据加载器
    调用说明：
    1. 初始化：传入语言对、分词器
    2. make_dataset：加载数据集
    3. build_vocab：构建词典
    4. make_iter：获得迭代器
    """
    # 字典
    vocab_target = None
    vocab_source = None
    # 特殊符号
    UNK_IDX, PAD_IDX, BOS_IDX, EOS_IDX = 0, 1, 2, 3
    _special_symbols = ['<unk>', '<pad>', '<bos>', '<eos>']
    # 设备
    device = None

    def __init__(self, pair, tokenize_source, tokenize_target):
        # 分词器
        self.tokenize_target = tokenize_target
        self.tokenize_source = tokenize_source
        # 翻译语言对
        self.pair = pair

        print('dataset initializing start')

    def make_dataset(self):
        # 加载数据集
        train_data, valid_data, test_data = Multi30k('data', language_pair=self.pair)
        return train_data, valid_data, test_data

    def build_vocab(self, min_freq):
        # 构建词典
        train_data = Multi30k(split='train', language_pair=self.pair)
        self.vocab_source = build_vocab_from_iterator(self._yield_tokens(train_data, source=True),
                                                      min_freq=min_freq,
                                                      specials=self._special_symbols)
        self.vocab_source.set_default_index(self.UNK_IDX)
        # 重新加载数据，防止exhausted警告
        train_data = Multi30k(split='train', language_pair=self.pair)
        self.vocab_target = build_vocab_from_iterator(self._yield_tokens(train_data, source=False),
                                                      min_freq=min_freq,
                                                      specials=self._special_symbols)
        self.vocab_target.set_default_index(self.UNK_IDX)

    def make_iter(self, train, validate, test, batch_size, device):
        """
        构建迭代器
        :param train: 训练数据集
        :param validate: 验证数据集
        :param test:    测试数据集
        :param batch_size: 批次大小
        :param device: 设备
        :return: 训练迭代器、验证迭代器、测试迭代器
            rtn_data.src: torch[batch_size, seq_len]
            rtn_data.trg: torch[batch_size, seq_len]
        """
        # 构建迭代器
        print('dataset initializing done')
        self.device = device
        train_iterator = data.DataLoader(train, batch_size=batch_size, shuffle=True, collate_fn=self._collate_fn)
        valid_iterator = data.DataLoader(validate, batch_size=batch_size, shuffle=True, collate_fn=self._collate_fn)
        test_iterator = data.DataLoader(test, batch_size=batch_size, shuffle=True, collate_fn=self._collate_fn)
        return train_iterator, valid_iterator, test_iterator

    def get_sentence(self, tokens, vocab):
        # 用于将tokens转换为sentence
        return ' '.join([vocab.get_itos()[token] for token in tokens])

    def _yield_tokens(self, data_iter, source=True):
        # 生成器函数，用于生成数据集中的token
        for src_sample, tgt_sample in data_iter:
            if source:
                yield self.tokenize_source(src_sample.lower())
            else:
                yield self.tokenize_target(tgt_sample.lower())

    def _tensor_transform(self, sentence, tokenize, vocab):
        # 用于将sentence转换为tensor
        # 用于将sentence转换为tokens
        token_ids = vocab(tokenize(sentence.rstrip("\n").lower()))
        # 用于将token转换为tensor
        return torch.cat((torch.tensor([2]),
                          torch.tensor(token_ids),
                          torch.tensor([3])), dim=0)

    def _collate_fn(self, batch):
        # 用于生成批次数据
        srcs = []
        tgts = []
        for src_sample, tgt_sample in batch:
            # 将句子转换为tensor
            srcs.append(self._tensor_transform(src_sample, self.tokenize_source, self.vocab_source))
            tgts.append(self._tensor_transform(tgt_sample, self.tokenize_target, self.vocab_target))
        # 为保证每个批次中的句子长度一致，需要对句子进行填充
        src_batch = pad_sequence(srcs, padding_value=self.PAD_IDX).T.to(self.device)
        tgt_batch = pad_sequence(tgts, padding_value=self.PAD_IDX).T.to(self.device)
        return DataBean(src_batch, tgt_batch)


class DataBean():
    def __init__(self, src, trg):
        self.src = src.long()
        self.trg = trg.long()
