import torch
from loguru import logger
from torch.utils.data import DataLoader
from torch.utils.data.dataset import random_split
from torchtext.data.functional import to_map_style_dataset


class NewsDataset:
    """
    @ description: AG_NEWS 数据集辅助工具类型，主要实现以下两个核心功能:
        @funtion1: 将AG_NEWS数据集拆分为训练集(train_dataset)、验证集(valid_dataset)
        @funtion2: 将处理好的训练集、验证集进行batch封装，每个批次封装样本数量，参阅config.py配置文件对象 batch_size 属性
    """
    def __init__(self, tokenizer=None, vocab=None, config=None):
        """
        @param tokenizer: 文本转token工具
        @param vocab: 数据集词汇表，token与index索引映射表
        @param config: 项目配置对象
        """
        self.tokenizer = tokenizer
        self.batch_size = config.batch_size
        self.vocab = vocab
        # 文本转index索引函数
        self.text_pipeline = lambda x: vocab(tokenizer(x))
        # 数据集类别标签处理，默认四个类别，标签分别为 [1, 2, 3, 4], 处理后 [0, 1, 2, 3]
        self.label_pipeline = lambda x: int(x) - 1
        # 判断设备为 cpu 或 cuda
        self.device = config.device

    def generate_batch(self, batch):
        """
        生成batch数据函数
        :param batch: 样本张量和对应标签的元组 shape [(label1, sample1), (label2, sample2), ..., (labelN, sampleN)]
        :return:
                text_list = tensor([sample1, sample2, ..., sampleN])， 样本文本内容idx张量
                offsets 每个样本文本张量在text_list中索引的偏移量
                label_list = tensor([label1, label2, ..., labelN]) 样本标签张量
        """
        label_list, text_list, offsets = [], [], [0]
        for _label, _text in batch:
            label_list.append(self.label_pipeline(_label))
            processed_text = torch.tensor(self.text_pipeline(_text), dtype=torch.int64)
            text_list.append(processed_text)
            offsets.append(processed_text.size(0))
        label_list = torch.tensor(label_list, dtype=torch.int64)
        offsets = torch.tensor(offsets[:-1]).cumsum(dim=0)
        text_list = torch.cat(text_list)
        return label_list.to(self.device), text_list.to(self.device), offsets.to(self.device)

    def train_valid_split(self, data, ratio=0.95):
        """
        将数据集拆分为训练集和验证集
        :param data: 需要拆分的数据集整体
        :param ratio: 训练集和占比
        :return:
                train_dataloader, valid_dataloader
        """

        # 训练集样本数量
        data = to_map_style_dataset(data)
        train_len = int(len(data) * ratio)
        logger.debug('start splitting dataset')
        sub_train_, sub_valid_ = random_split(data, [train_len, len(data) - train_len])
        logger.debug('splitting dataset successfully')
        train_dataloader = DataLoader(sub_train_,
                                      shuffle=True,
                                      batch_size=self.batch_size,
                                      collate_fn=self.generate_batch)
        valid_dataloader = DataLoader(sub_valid_,
                                      shuffle=True,
                                      batch_size=self.batch_size,
                                      collate_fn=self.generate_batch)
        return train_dataloader, valid_dataloader
