import os
import pickle

from tqdm import tqdm
from transformers import CpmTokenizer

from ai import AiConstant
from ai.utils import utils_file
from . import gpt_config


logger = gpt_config.gpt_logger
args = gpt_config.args

# 初始化tokenizer
tokenizer = CpmTokenizer(vocab_file=args.vocab_file_path)  # pip install jieba
eod_id = tokenizer.convert_tokens_to_ids("<eod>")  # 文档结束符
sep_id = tokenizer.sep_token_id

# 读取作文数据集目录下的所有文件
train_list = []

logger.info("start tokenizing data.........")
for file in tqdm(os.listdir(args.data_path)):
    file = os.path.join(args.data_path, file)
    with open(file, "r", encoding="utf8") as reader:
        lines = reader.readlines()
        title = lines[1][3:].strip()  # 取出标题
        lines = lines[7:]  # 取出正文内容
        article = ""  # 将正文汇总到一个字符串中
        for line in lines:
            if line.strip() != "":  # 去除换行
                article += line
        title_ids = tokenizer.encode(title, add_special_tokens=False)
        article_ids = tokenizer.encode(article, add_special_tokens=False)
        token_ids = title_ids + [sep_id] + article_ids + [eod_id]
        # train_list.append(token_ids)

        # 对于每条数据，使用滑动窗口对其进行截断
        win_size = args.window_size
        step = args.window_step
        start_index = 0
        end_index = win_size
        while end_index + 50 < len(token_ids):  # 剩下的数据长度，大于或等于50，才加入训练数据集
            data = token_ids[start_index:end_index]
            train_list.append(data)
            start_index += step
            end_index += step

# 序列化训练数据
utils_file.makedir_for_file(args.save_train_list_file_path)
with open(args.save_train_list_file_path, "wb") as f:
    pickle.dump(train_list, f)
