#!/usr/bin/env python3
"""
使用HuggingFace tokenizers加载MiniMind2分词器
"""

import json
import os
from typing import List, Optional

try:
    from tokenizers import Tokenizer
    from tokenizers.models import BPE
    from tokenizers.pre_tokenizers import ByteLevel
    from tokenizers.processors import ByteLevel
    HF_AVAILABLE = True
except ImportError:
    HF_AVAILABLE = False
    print("Warning: HuggingFace tokenizers not available, using fallback")

class MiniMindTokenizer:
    """MiniMind2分词器 - 使用HuggingFace tokenizers"""
    
    def __init__(self, tokenizer_path: str = None):
        """初始化分词器"""
        if tokenizer_path is None:
            tokenizer_path = "/Users/sd/Desktop/mycode/myalgo/milvus/minimind/MiniMind2"
        
        self.tokenizer_path = tokenizer_path
        
        if HF_AVAILABLE:
            try:
                # 尝试加载HuggingFace分词器
                self.tokenizer = Tokenizer.from_file(os.path.join(tokenizer_path, "tokenizer.json"))
                self.use_hf = True
                print("使用HuggingFace tokenizers")
            except Exception as e:
                print(f"HuggingFace tokenizers加载失败: {e}")
                self._load_fallback()
        else:
            self._load_fallback()
        
        # 加载配置
        config_path = os.path.join(tokenizer_path, "tokenizer_config.json")
        with open(config_path, 'r', encoding='utf-8') as f:
            self.config = json.load(f)
        
        # 特殊token
        self.pad_token = self.config.get("pad_token", "<|endoftext|>")
        self.unk_token = self.config.get("unk_token", "<|endoftext|>")
        self.bos_token = self.config.get("bos_token", "<|im_start|>")
        self.eos_token = self.config.get("eos_token", "<|im_end|>")
        
        # 获取特殊token的ID
        self.pad_token_id = self._get_token_id(self.pad_token)
        self.unk_token_id = self._get_token_id(self.unk_token)
        self.bos_token_id = self._get_token_id(self.bos_token)
        self.eos_token_id = self._get_token_id(self.eos_token)
        
        print(f"特殊token: pad={self.pad_token_id}, unk={self.unk_token_id}, bos={self.bos_token_id}, eos={self.eos_token_id}")
    
    def _load_fallback(self):
        """加载fallback分词器"""
        self.use_hf = False
        # 加载词汇表
        with open(os.path.join(self.tokenizer_path, "tokenizer.json"), 'r', encoding='utf-8') as f:
            tokenizer_data = json.load(f)
        
        self.vocab = tokenizer_data['model']['vocab']
        self.id_to_token = {idx: token for token, idx in self.vocab.items()}
        self.vocab_size = len(self.vocab)
        print(f"使用fallback分词器，词汇表大小: {self.vocab_size}")
    
    def _get_token_id(self, token: str) -> int:
        """获取token的ID"""
        if self.use_hf:
            return self.tokenizer.encode(token).ids[0]
        else:
            return self.vocab.get(token, self.vocab.get(self.unk_token, 0))
    
    def encode(self, text: str, add_special_tokens: bool = True) -> List[int]:
        """编码文本为token ids"""
        if self.use_hf:
            if add_special_tokens:
                text = self.bos_token + text + self.eos_token
            encoding = self.tokenizer.encode(text)
            return encoding.ids
        else:
            # Fallback实现
            if add_special_tokens:
                text = self.bos_token + text + self.eos_token
            
            # 简单的字符级编码
            token_ids = []
            for char in text:
                if char in self.vocab:
                    token_ids.append(self.vocab[char])
                else:
                    # 尝试字节级编码
                    byte_chars = char.encode('utf-8')
                    for byte_char in byte_chars:
                        char_str = chr(byte_char)
                        if char_str in self.vocab:
                            token_ids.append(self.vocab[char_str])
                        else:
                            token_ids.append(self.unk_token_id)
            
            return token_ids
    
    def decode(self, token_ids: List[int], skip_special_tokens: bool = True) -> str:
        """解码token ids为文本"""
        if self.use_hf:
            return self.tokenizer.decode(token_ids, skip_special_tokens=skip_special_tokens)
        else:
            # Fallback实现
            tokens = []
            for token_id in token_ids:
                if token_id in self.id_to_token:
                    tokens.append(self.id_to_token[token_id])
                else:
                    tokens.append(self.unk_token)
            
            # 合并tokens
            text = ''.join(tokens)
            
            # 移除特殊token
            if skip_special_tokens:
                text = text.replace(self.bos_token, '')
                text = text.replace(self.eos_token, '')
                text = text.replace(self.pad_token, '')
                text = text.replace(self.unk_token, '')
            
            return text.strip()
    
    @property
    def vocab_size(self) -> int:
        """词汇表大小"""
        if self.use_hf:
            return self.tokenizer.get_vocab_size()
        else:
            return len(self.vocab)
    
    def save_pretrained(self, save_directory: str):
        """保存分词器"""
        os.makedirs(save_directory, exist_ok=True)
        
        if self.use_hf:
            # 保存HuggingFace分词器
            self.tokenizer.save(os.path.join(save_directory, "tokenizer.json"))
        else:
            # 保存fallback分词器
            vocab_data = {
                "vocab": self.vocab,
                "special_tokens": {
                    "pad_token": self.pad_token,
                    "unk_token": self.unk_token,
                    "bos_token": self.bos_token,
                    "eos_token": self.eos_token
                }
            }
            
            with open(os.path.join(save_directory, "vocab.json"), "w", encoding="utf-8") as f:
                json.dump(vocab_data, f, ensure_ascii=False, indent=2)
        
        # 保存配置
        with open(os.path.join(save_directory, "tokenizer_config.json"), "w", encoding="utf-8") as f:
            json.dump(self.config, f, ensure_ascii=False, indent=2)
    
    @classmethod
    def from_pretrained(cls, model_path: str, **kwargs):
        """从预训练模型加载分词器"""
        return cls(model_path)
    
    def __call__(self, text: str, return_tensors: str = None, truncation: bool = False, 
                 max_length: int = None, add_special_tokens: bool = True, **kwargs):
        """使tokenizer可调用，兼容HuggingFace接口"""
        token_ids = self.encode(text, add_special_tokens=add_special_tokens)
        
        # 截断
        if truncation and max_length and len(token_ids) > max_length:
            token_ids = token_ids[:max_length]
        
        # 返回格式
        if return_tensors == "pt":
            import torch
            return {
                "input_ids": torch.tensor([token_ids]),
                "attention_mask": torch.ones([1, len(token_ids)])
            }
        else:
            return {
                "input_ids": [token_ids],
                "attention_mask": [1] * len(token_ids)
            }

def test_tokenizer():
    """测试分词器"""
    tokenizer = MiniMindTokenizer()
    
    # 测试编码
    test_texts = [
        "你好",
        "今天天气很好",
        "人工智能是未来的趋势",
        "Hello world!",
        "这是一个测试。"
    ]
    
    for text in test_texts:
        print(f"\n原文: {text}")
        token_ids = tokenizer.encode(text)
        print(f"Token IDs: {token_ids[:20]}...")  # 只显示前20个
        decoded = tokenizer.decode(token_ids)
        print(f"解码: {decoded}")

if __name__ == "__main__":
    test_tokenizer()
