import sentencepiece as spm
import json
from pathlib import Path
from glob import glob
from tqdm import tqdm


def text_process():
    """处理Emilia-YODAS中文文本数据集"""
    # 配置路径参数
    input_dir = Path("./data/Emilia-YODAS/ZH")
    output_file = Path("./text.txt")

    # 扫描子目录，获取全部JSON文件路径
    json_paths = []
    for subdir in input_dir.glob("ZH-B*"):
        for json_path in subdir.glob("*.json"):
            json_paths.append((json_path))

    # 创建文本缓冲区（提升大文件处理效率）
    text_buffer = []

    # 带异常处理的文件读取
    for file_path in tqdm(json_paths):
        try:
            with open(file_path, "r", encoding="utf-8") as f:
                data = json.load(f)

                # 多层级text字段提取
                text = (
                    data.get("text", "")
                    or data.get("content", {}).get("text", "")
                    or data.get("metadata", {}).get("text", "")
                )

                # 文本标准化处理
                clean_text = text.strip().replace("\r", "").replace("\n", " ")
                if clean_text:
                    text_buffer.append(clean_text)

        except (json.JSONDecodeError, KeyError) as e:
            print(f"解析失败: {file_path} - {str(e)}")
        except Exception as e:
            print(f"未知错误: {file_path} - {str(e)}")

    # 批量写入文件（原子性操作）
    try:
        with open(output_file, "w", encoding="utf-8", newline="\n") as f:
            f.write("\n".join(text_buffer))
        print(f"成功生成: {output_file} ({len(text_buffer)}条记录)")
        return True
    except IOError as e:
        print(f"文件写入失败: {str(e)}")
        return False


if __name__ == "__main__":
    # text_process()

    model_name = "./tokenizer/chinese"
    model_type = "unigram"
    vocab_size = 5000

    # spm.SentencePieceTrainer.Train(
    #     input="text.txt",
    #     vocab_size=vocab_size,
    #     model_type=model_type,
    #     model_prefix=model_name,
    #     character_coverage=0.9995,
    #     pad_id=0,
    #     unk_id=1,
    #     bos_id=2,
    #     eos_id=3,
    # )

    # 加载分词器
    model_file = "./tokenizer/chinese.model"
    chinese_sp = spm.SentencePieceProcessor(model_file=model_file)
    print("vocab_size from tokenizer:", chinese_sp.vocab_size())
    print(chinese_sp.encode_as_ids('这是一句测试语句'))
    print(chinese_sp.pad_id())


