# scripts/encode_tokens.py
import sentencepiece as spm
import numpy as np
import os
from tqdm import tqdm

# ========================
# 配置
# =================-------
DATA_DIR = "../data"
TOKENIZER_PATH = "../tokenizer/pretrain_corpus_sp.model"
INPUT_TEXT = os.path.join(DATA_DIR, "pretrain_corpus.txt")
OUTPUT = os.path.join(DATA_DIR, "pretrain_corpus_tokens_IDs")

# ========================
# 检查文件是否存在
# ========================
if not os.path.exists(INPUT_TEXT):
    raise FileNotFoundError(f"未找到文本文件: {INPUT_TEXT}")
if not os.path.exists(TOKENIZER_PATH):
    raise FileNotFoundError(f"未找到分词器模型: {TOKENIZER_PATH}")

# ========================
# 加载分词器
# ========================
print("🔁 加载分词器...")
sp = spm.SentencePieceProcessor()
sp.load(TOKENIZER_PATH)

# ========================
# 统计总行数（用于进度条）
# ========================
print("📊 正在统计文本行数（用于进度显示）...")
total_lines = 0
with open(INPUT_TEXT, 'r', encoding='utf-8') as f:
    for _ in f:
        total_lines += 1
print(f"📁 共 {total_lines:,} 行文本，开始编码...")

# ========================
# 分词并写入 token IDs
# ========================
with open(INPUT_TEXT, 'r', encoding='utf-8') as fin, \
     open(OUTPUT, 'w', encoding='utf-8') as fout:

    # 使用 tqdm 添加进度条
    for line in tqdm(fin, total=total_lines, desc="🔤 编码中", unit="行"):
        text = line.strip()
        if text == "":
            continue  # 跳过空行

        ids = sp.encode(text, out_type=int, add_eos=True)  # 文本 → Token ID 列表
        if ids:  # 确保不为空
            fout.write(' '.join(map(str, ids)) + ' ')  # 用空格分隔，末尾加空格

print(f"✅ 编码完成！Token IDs 已保存至: {OUTPUT}")