# encoding: utf-8
# @Time:    :2025/2/5 21:57

import os
import json
from transformers import AutoTokenizer

bos_token = "<s>"
eos_token = "</s>"


def process_oneline(data_string: str) -> str:
    temp = ""
    try:
        data = json.loads(data_string)
        summary = data.get("summary")
        if summary:
            summary = "".join(summary.split("\n"))
            temp += summary
        sections = data.get("sections", [])
        for sec in sections:
            content = sec.get("content")
            if content:
                content = "".join(content.split("\n"))
                temp += content
        return temp
    except Exception as e:
        return temp


def pretrain_process():
    chunk_idx = 0
    max_lines = 10000
    path = "D:/data/pretrain_datas/563w_baidubaike.json"
    with open(path, "r", encoding="utf8") as f:
        with open("./datas/pretrain_data.txt", "w", newline="", encoding="utf-8") as ff:
            for line in f:
                text = process_oneline(line)
                ff.write(text)
                ff.write("\n")
                chunk_idx += 1
                if chunk_idx > max_lines:
                    break


if __name__ == "__main__":
    tokenizer = AutoTokenizer.from_pretrained("./model/minimind_tokenizer")
    print("tokenizer 词表大小：", len(tokenizer))
    ################
    # 1: pretrain
    # 2: sft
    # 3: RL
    ################
    process_type = 1
    if process_type == 1:
        pretrain_process()
