"""
BPE分词器实现
- 训练语料生成词汇表（支持自动调整大小）
- Encode/Decode文本
"""
import json
import os
from typing import List, Dict

import sentencepiece as spm


class JiaboTokenizer:
    """轻量级BPE分词器（基于SentencePiece）"""
    
    def __init__(self, vocab_path: str):
        self.vocab_path = vocab_path
        self.vocab: Dict[str, int] = {}
        self.inv_vocab: Dict[int, str] = {}
        self.load_vocab()
        
        # 特殊Token
        self.pad_token = "[PAD]"
        self.bos_token = "[BOS]"
        self.eos_token = "[EOS]"
        self.unk_token = "[UNK]"
        
    def load_vocab(self):
        """从JSON加载词汇表"""
        if not os.path.exists(self.vocab_path):
            raise FileNotFoundError(f"词汇表文件不存在: {self.vocab_path}")
        
        with open(self.vocab_path, "r", encoding="utf-8") as f:
            self.vocab = json.load(f)
        self.inv_vocab = {v: k for k, v in self.vocab.items()}
        print(f"✅ 词汇表加载成功 | 词汇量: {len(self.vocab):,}")
    
    @classmethod
    def train_from_corpus(cls, corpus_path: str, output_path: str, vocab_size: int = 32000):
        """训练新词汇表（自动调整大小）"""
        print(f"📚 正在读取语料: {corpus_path}")
        
        # 先统计语料实际字符量
        with open(corpus_path, "r", encoding="utf-8") as f:
            text = f.read()
        
        # 自动调整词汇表大小（不能超过唯一字符数的50倍）
        unique_chars = len(set(text))
        max_vocab_size = min(vocab_size, unique_chars * 50, len(text) // 10)
        
        if max_vocab_size < vocab_size:
            print(f"⚠️  语料太小，自动调整词汇表: {vocab_size} -> {max_vocab_size}")
            vocab_size = max_vocab_size
        
        # 确保至少包含特殊token和常用词
        vocab_size = max(vocab_size, 1000)
        
        print(f"📝 训练BPE模型 | 目标词汇量: {vocab_size}")
        
        # 训练SentencePiece模型
        spm.SentencePieceTrainer.train(
            input=corpus_path,
            model_prefix="temp_sp_model",
            vocab_size=vocab_size,
            character_coverage=0.9995,
            model_type="bpe",
            pad_id=0, bos_id=1, eos_id=2, unk_id=3,
        )
        
        # 转换为JSON格式
        sp = spm.SentencePieceProcessor(model_file="temp_sp_model.model")
        vocab = {sp.id_to_piece(i): i for i in range(sp.get_piece_size())}
        
        # 确保包含特殊token
        special_tokens = ["[PAD]", "[BOS]", "[EOS]", "[UNK]"]
        for i, token in enumerate(special_tokens):
            vocab[token] = i
        
        # 保存词汇表
        os.makedirs(os.path.dirname(output_path), exist_ok=True)
        with open(output_path, "w", encoding="utf-8") as f:
            json.dump(vocab, f, ensure_ascii=False, indent=2)
        
        # 清理临时文件
        for file in ["temp_sp_model.model", "temp_sp_model.vocab"]:
            if os.path.exists(file):
                os.remove(file)
        
        print(f"✅ 词汇表已保存至 {output_path} | 实际词汇量: {len(vocab):,}")
        return cls(output_path)
    
    def encode(self, text: str, max_length: int = None) -> List[int]:
        """文本转Token IDs"""
        # 简单实现：字符级分词（实际应调用SentencePiece模型）
        tokens = list(text)
        ids = [self.vocab.get(token, self.vocab.get(self.unk_token, 3)) for token in tokens]
        
        if max_length:
            ids = ids[:max_length]
        return ids
    
    def decode(self, ids: List[int]) -> str:
        """Token IDs转文本"""
        tokens = [self.inv_vocab.get(i, self.unk_token) for i in ids]
        return "".join(tokens).replace("▁", " ").strip()
    
    def __len__(self):
        return len(self.vocab)
