# 在ushertonizers.py中添加以下代码
import os
import random

from base_model.nlp.base_model.first_model_mla import UsherConfig
from base_model.nlp.base_model.first_model_mla import usherTokenizers
from base_model.nlp.base_model.first_model_mla import Transformer
from base_model.nlp.base_model.first_model_mla import modelInit
from base_model.nlp.base_model.first_model_mla import forward

orign_data = []

trans_map = {
    "0": "零",
    "1": "一",
    "2": "二",
    "3": "三",
    "4": "四",
    "5": "五",
    "6": "六",
    "7": "七",
    "8": "八",
    "9": "九"
}
# trans_map = {
#     "0": "0",
#     "1": "1",
#     "2": "2",
#     "3": "3",
#     "4": "4",
#     "5": "5",
#     "6": "6",
#     "7": "7",
#     "8": "8",
#     "9": "9"
# }
num_count = 50000

for i in range(num_count):
    current_string = ""
    out_string = ""
    num_length = random.randint(1, 9)
    for j in range(num_length):
        num = random.randint(0, 9)
        current_string += str(num)
        out_string += trans_map[str(num)]
    orign_data_item = {"input": current_string, "output": out_string}
    orign_data.append(orign_data_item)
config = UsherConfig()

# 生成训练文本文件
train_file = config.path + "/train.jsonl"

# 新增路径创建逻辑
if not os.path.exists(config.path):
    os.makedirs(config.path)

with open(train_file, "w", encoding="utf-8") as f:
    for item in orign_data:
        prompt = f'{{"input": "{item["input"]}", "output": "{item["output"]}"}}'
        f.write(prompt + "\n")

usherTokenizers.init()
modelInit()
# forward(r"123")