import pickle
import torch
from sympy import false
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import DataLoader
from Gpt2_Chatbot.data_preprocess import MyDataset
from Gpt2_chatbot.parameter_config import ParameterConfig

from src.ptune_multi_chat_glm.utils import config_ini, get_project_root, get_logger


def collate_fn(batch_data):
    """
    自定义collate_fn函数，用于将一个batch的数据进行padding
    :param batch_data: 一个batch的数据，每个元素为一个样本的input_ids
    :return: 一个dict，包含padding后的input_ids、token_type_ids、attention_mask
    """
    batch_data = [torch.tensor(data) for data in batch_data]
    # 对input_ids进行padding
    input_ids = pad_sequence(batch_data, batch_first=True, padding_value=0)
    # 构造token_type_ids和attention_mask
    labels = pad_sequence(batch_data, batch_first=True, padding_value=100)
    return input_ids, labels


def get_dataloader(train_path: str, valid_path: str):
    """
    获取dataloader
    :param dataset: 数据集
    :param batch_size: 批次大小
    :param shuffle: 是否打乱数据集
    :return: dataloader
    """
    with open(train_path, 'rb') as f:
        train_data = pickle.load(f)
    with open(valid_path, 'rb') as f:
        valid_data = pickle.load(f)
    train_dataset = MyDataset(train_data)
    valid_dataset = MyDataset(valid_data)
    # shuffle=True,只在训练集上使用
    train_dataloader = DataLoader(train_dataset, batch_size=config_ini.batch_size,
                                  shuffle=True, collate_fn=collate_fn, drop_last=True)
    valid_dataloader = DataLoader(valid_dataset, batch_size=config_ini.batch_size,
                                  shuffle=False, collate_fn=collate_fn, drop_last=True)
    return train_dataloader, valid_dataloader
