# encoding: utf-8
# @Time:    :2025/2/6 21:46
# description: data process for next token predict

import json
import traceback
from loguru import logger

from typing import List

from transformers import AutoTokenizer

pretrained_path = "E:/codes/llm_about/llm_from_zero/my_minimind/model/minimind_tokenizer"
tokenizer = AutoTokenizer.from_pretrained(pretrained_path)


def process_one_line(data_string: str) -> List[str]:
    target_length = 129
    result = []
    temp = ""
    try:
        data = json.loads(data_string)
        summary = data.get("summary")
        if summary:
            summary = "".join(summary.split("\n"))
            temp += summary
        sections = data.get("sections", [])
        for sec in sections:
            content = sec.get("content")
            if content:
                content = "".join(content.split("\n"))
                temp += content
        token_ids = tokenizer.encode(temp)
        token_length = len(token_ids)
        pad_length = target_length - token_length
        if pad_length > 0:
            token_ids = token_ids + [tokenizer.pad_token_id] * pad_length

        for i in range(0, len(token_ids) - target_length, 1):
            result.append(token_ids[i:i + target_length])
        return result

    except Exception as e:
        logger.error(traceback.format_exc())
        return result

    return result


def pretrain_process():
    chunk_idx = 0
    max_length = 500
    path = "D:/data/pretrain_datas/563w_baidubaike.json"
    with open(path, 'r', encoding="utf-8") as f:
        with open("./datas/pretrain_data.txt", 'w', encoding="utf-8") as ff:
            for line in f:
                token_lists = process_one_line(line)
                for token_list in token_lists:
                    ff.write(json.dumps(token_list))
                    ff.write("\n")

                chunk_idx += 1
                if chunk_idx > max_length:
                    return 1


if __name__ == "__main__":
    pretrain_process()
