import pickle   # 对象序列化与反序列化

from transformers import BertTokenizer
from Gpt2_chatbot.parameter_config import ParameterConfig

from src.ptune_multi_chat_glm.utils import config_ini, get_project_root, get_logger

logger = get_logger(__name__)

def data_preprocessing(txt_path: str, save_path: str):
    """
    数据预处理函数，将原始文本数据转换为模型输入格式
    :param txt_path: 原始文本文件路径
    :param save_path: 预处理后数据保存路径
    """
    logger.info(f"开始预处理数据，原始文本文件路径：{txt_path}，预处理后数据保存路径：{save_path}")
    # 1、加载分词器
    # tokenizer = BertTokenizer.from_pretrained(get_project_root.joinpath(config_ini.vocab_path))
    # 使用本地预训练模型的tokenizer
    tokenizer = BertTokenizer.from_pretrained(get_project_root.joinpath(config_ini.vocab_path))
    # 特殊字符
    sep_id = tokenizer.sep_token_id
    cls_id = tokenizer.cls_token_id
    # 读取数据，拆分每段对话
    with open(txt_path, 'r', encoding='utf-8') as f:
        data = f.read()
    data_list = data.split("\n\n")

    # 对每段对话进行处理，转换为QA问对对的形式，并进行编码
    processed_data = []
    for dialog in data_list:
        dialog_list = dialog.split("\n")
        input_ids = [cls_id]
        for sentence in dialog_list:
            sentence_ids = tokenizer.encode(sentence, add_special_tokens=False)
            input_ids.extend(sentence_ids)
            input_ids.append(sep_id)
        processed_data.append(input_ids)
    with open(save_path, 'wb') as f:
        pickle.dump(processed_data, f)
    
