# 导入分词器
import os
from transformers import BertTokenizerFast, BertTokenizer
# 将数据保存为pkl文件，方便下次读取
import pickle
# 读取数据的进度条展示
from tqdm import tqdm


def process(train_txt_path, train_pkl_path):
    """
    对原始语料进行tokenize，将每段对话处理成如下形式："[CLS]sequence1[SEP]sequence2[SEP]sequence3[SEP]"
    """

    '''初始化tokenizer，使用BertTokenizerFast.创建一个tokenizer对象'''
    current_dir = os.path.dirname(os.path.abspath(__file__))
    tokenizer = BertTokenizerFast(os.path.join(current_dir, '../vocab/vocab.txt'),
                                   sep_token="[SEP]",
                                   pad_token="[PAD]",
                                   cls_token="[CLS]")
    #
    # tokenizer = BertTokenizer('../vocab/vocab2.txt',)

    print(len(tokenizer.get_vocab()))
    sep_id = tokenizer.sep_token_id  # 获取分隔符[SEP]的token ID
    print(f'sep_id--》{sep_id}')
    cls_id = tokenizer.cls_token_id  # 获取起始符[CLS]的token ID
    print(f'cls_id--》{cls_id}')

    # 读数据
    with open(train_txt_path, 'r', encoding='utf-8') as fr:
        data = fr.read()
    # print(data)
    # 根据换行符区分不同的对话段落，需要区分Windows和Linux环境下的换行符
    if "\r\n" in data:
        train_data = data.split("\r\n\r\n")
    else:
        train_data = data.split("\n\n")
    print(f'len(train_data)--->{len(train_data)}')  # 打印对话段落数量
    print(f'train_data--》{train_data[:3]}')

    # 开始进行tokenize
    # 保存所有的对话数据,每条数据的格式为："[CLS]seq1[SEP]seq2[SEP]seq3[SEP]"
    dialogue_len = []  # 记录所有对话tokenize之后的长度，用于统计中位数与均值
    dialogue_list = []  # 记录所有对话

    for index, dialogue in enumerate(tqdm(train_data)):
        if "\r\n" in dialogue:
            sequences = dialogue.split("\r\n")
        else:
            sequences = dialogue.split("\n")
            # print(f'sequences--》{sequences}')

            input_ids = [cls_id]  # 每个dialogue以[CLS]开头
            for sequence in sequences:
                # 将每个对话句子进行tokenize，并将结果拼接到input_ids列表中
                input_ids += tokenizer.encode(sequence, add_special_tokens=False)
                input_ids.append(sep_id)  # 每个seq之后添加[SEP]，表示seqs会话结束
            dialogue_len.append(len(input_ids))  # 将对话的tokenize后的长度添加到对话长度列表中
            dialogue_list.append(input_ids)  # 将tokenize后的对话添加到对话列表中

    print(f'dialogue_len--->{dialogue_len}')  # 打印对话长度列表
    print(f'dialogue_list--->{dialogue_list[:2]}')  # 打印

    # 保存pkl文件数据
    with open(train_pkl_path, "wb") as f:
        pickle.dump(dialogue_list, f)

if __name__ == '__main__':

    # # 医疗对话训练集
    # train_txt_path = "../data/medical_train.txt"
    # train_pkl_path = "../data/medical_train.pkl"

    # current_dir = os.path.dirname(os.path.abspath(__file__))
    # train_txt_path =  os.path.join(current_dir, train_txt_path)
    # train_pkl_path =  os.path.join(current_dir, train_pkl_path)

    # process(train_txt_path, train_pkl_path)


    # ####################################################
    # ####################################################

    # # 医疗对话验证集
    # valid_txt_path = "../data/medical_valid.txt"
    # valid_pkl_path = "../data/medical_valid.pkl"

    # current_dir = os.path.dirname(os.path.abspath(__file__))
    # valid_txt_path =  os.path.join(current_dir, valid_txt_path)
    # valid_pkl_path =  os.path.join(current_dir, valid_pkl_path)

    # process(valid_txt_path, valid_pkl_path)


    ####################################################
    ####################################################

    # 闲聊对话训练集
    train_txt_path = "../data/闲聊语料.txt"
    train_pkl_path = "../data/chat_train.pkl"

    current_dir = os.path.dirname(os.path.abspath(__file__))
    train_txt_path =  os.path.join(current_dir, train_txt_path)
    train_pkl_path =  os.path.join(current_dir, train_pkl_path)

    process(train_txt_path, train_pkl_path)


