
from transformers import BertTokenizerFast
import pickle
from tqdm import tqdm
def data_preprocess(input_txt_path, output_txt_path):
    """

    :param input_txt_path: 原始文件路径
    :param output_txt_path: 转换为id后的数据文件路径
    :return: 没有返回
    """
    # 默认从预训练模型Bert-base-chinese中加载
    # tokenizer = BertTokenizerFast.from_pretrained('bert-base-chinese')
    # 也可以从本地加载
    tokenizer = BertTokenizerFast(vocab_file='D:\\Project\\GPT2\\vocab\\vocab.txt',
                                  sep_token='[SEP]',
                                  cls_token='[CLS]',
                                  pad_token='[PAD]')

    cls_id = tokenizer.cls_token_id # 开始占位符
    sep_id = tokenizer.sep_token_id # 结束占位符

    # 加载原始数据
    with open(input_txt_path, 'r', encoding='utf-8') as f:
        data = f.read()

    if "\r\n" in data:
        qa_list = data.split("\r\n\r\n")
    else:
        qa_list = data.split("\n\n")

    qa_ids_list = []

    for index, qa_data in enumerate(tqdm(qa_list)):
        if "\r\n" in qa_data:
            seq = qa_data.split("\r\n")
        else:
            seq = qa_data.split("\n")
        input_ids = [cls_id]

        for qa in seq:
            id = tokenizer.encode(qa, add_special_tokens=False)
            input_ids += id
            input_ids.append(sep_id)

        qa_ids_list.append(input_ids)

        with open(output_txt_path, "wb") as f:
            pickle.dump(qa_ids_list, f)

if __name__ == '__main__':
    # TODO 数据转为词表中的id，并存储到pkl文件中
    # 定义一个数据处理的函数
    # 调用函数传入原始数据
    data_preprocess('D:\\Project\\GPT2\\data\\medical_train.txt', 'D:\\Project\\GPT2\data\\medical_train.pkl')
    data_preprocess('D:\\Project\\GPT2\\data\\medical_valid.txt', 'D:\\Project\\GPT2\data\\medical_valid.pkl')

