#!/usr/bin/env python3
"""
MiniMind2分词器 - 使用原始词汇表
"""

import json
import os
from typing import List, Dict, Optional
import re

class MiniMindTokenizer:
    """MiniMind2分词器"""
    
    def __init__(self, vocab_path: str = None):
        """初始化分词器"""
        if vocab_path is None:
            vocab_path = "/Users/sd/Desktop/mycode/myalgo/milvus/minimind/MiniMind2/tokenizer.json"
        
        # 加载词汇表
        with open(vocab_path, 'r', encoding='utf-8') as f:
            tokenizer_data = json.load(f)
        
        self.vocab = tokenizer_data['model']['vocab']
        self.id_to_token = {idx: token for token, idx in self.vocab.items()}
        self.vocab_size = len(self.vocab)
        
        # 特殊token
        self.pad_token = "<|endoftext|>"
        self.unk_token = "<|endoftext|>"
        self.bos_token = "<|im_start|>"
        self.eos_token = "<|im_end|>"
        
        # 获取特殊token的ID
        self.pad_token_id = self.vocab[self.pad_token]
        self.unk_token_id = self.vocab[self.unk_token]
        self.bos_token_id = self.vocab[self.bos_token]
        self.eos_token_id = self.vocab[self.eos_token]
        
        # 构建BPE合并规则
        self.merges = tokenizer_data['model']['merges']
        self._build_merge_rules()
        
        print(f"词汇表大小: {self.vocab_size}")
        print(f"特殊token: pad={self.pad_token_id}, unk={self.unk_token_id}, bos={self.bos_token_id}, eos={self.eos_token_id}")
    
    def _build_merge_rules(self):
        """构建BPE合并规则"""
        self.merge_rules = {}
        for i, merge in enumerate(self.merges):
            if isinstance(merge, list) and len(merge) == 2:
                self.merge_rules[(merge[0], merge[1])] = i
            elif isinstance(merge, str):
                parts = merge.split()
                if len(parts) == 2:
                    self.merge_rules[(parts[0], parts[1])] = i
    
    def _bpe_encode(self, text: str) -> List[str]:
        """BPE编码 - 字节级实现"""
        # 将文本转换为字节级表示
        text_bytes = text.encode('utf-8')
        
        # 转换为字符列表，每个字符代表一个字节
        chars = [chr(b) for b in text_bytes]
        
        # 应用BPE合并规则
        while True:
            pairs = self._get_pairs(chars)
            if not pairs:
                break
            
            # 找到最优先的合并
            bigram = min(pairs, key=lambda pair: self.merge_rules.get(pair, float('inf')))
            if bigram not in self.merge_rules:
                break
            
            # 执行合并
            new_chars = []
            i = 0
            while i < len(chars):
                if i < len(chars) - 1 and chars[i] == bigram[0] and chars[i + 1] == bigram[1]:
                    new_chars.append(bigram[0] + bigram[1])
                    i += 2
                else:
                    new_chars.append(chars[i])
                    i += 1
            chars = new_chars
        
        return chars
    
    def _get_pairs(self, word: List[str]) -> List[tuple]:
        """获取相邻字符对"""
        pairs = set()
        prev_char = word[0]
        for char in word[1:]:
            pairs.add((prev_char, char))
            prev_char = char
        return pairs
    
    def encode(self, text: str, add_special_tokens: bool = True) -> List[int]:
        """编码文本为token ids"""
        if add_special_tokens:
            text = self.bos_token + text + self.eos_token
        
        # 使用BPE编码
        bpe_tokens = self._bpe_encode(text)
        
        # 转换为token ids
        token_ids = []
        for token in bpe_tokens:
            if token in self.vocab:
                token_ids.append(self.vocab[token])
            else:
                token_ids.append(self.unk_token_id)
        
        return token_ids
    
    def decode(self, token_ids: List[int]) -> str:
        """解码token ids为文本"""
        tokens = []
        for token_id in token_ids:
            if token_id in self.id_to_token:
                tokens.append(self.id_to_token[token_id])
            else:
                tokens.append(self.unk_token)
        
        # 合并tokens
        text = ''.join(tokens)
        
        # 移除特殊token
        text = text.replace(self.bos_token, '')
        text = text.replace(self.eos_token, '')
        
        # 处理BPE空格
        text = text.replace('Ġ', ' ')
        
        # 尝试解码字节级编码
        try:
            # 将字符转换回字节
            byte_text = text.encode('latin-1')
            # 解码为UTF-8
            decoded_text = byte_text.decode('utf-8')
            return decoded_text.strip()
        except:
            # 如果解码失败，返回原始文本
            return text.strip()
    
    def save_pretrained(self, save_directory: str):
        """保存分词器"""
        os.makedirs(save_directory, exist_ok=True)
        
        # 保存词汇表
        vocab_data = {
            "vocab": self.vocab,
            "merges": self.merges,
            "special_tokens": {
                "pad_token": self.pad_token,
                "unk_token": self.unk_token,
                "bos_token": self.bos_token,
                "eos_token": self.eos_token
            }
        }
        
        with open(os.path.join(save_directory, "vocab.json"), "w", encoding="utf-8") as f:
            json.dump(vocab_data, f, ensure_ascii=False, indent=2)
        
        # 保存配置
        config = {
            "vocab_size": self.vocab_size,
            "pad_token": self.pad_token,
            "unk_token": self.unk_token,
            "bos_token": self.bos_token,
            "eos_token": self.eos_token
        }
        
        with open(os.path.join(save_directory, "tokenizer_config.json"), "w", encoding="utf-8") as f:
            json.dump(config, f, ensure_ascii=False, indent=2)
    
    @classmethod
    def from_pretrained(cls, model_path: str, **kwargs):
        """从预训练模型加载分词器"""
        vocab_path = os.path.join(model_path, "vocab.json")
        if os.path.exists(vocab_path):
            return cls(vocab_path)
        else:
            return cls(**kwargs)

def test_tokenizer():
    """测试分词器"""
    tokenizer = MiniMindTokenizer()
    
    # 测试编码
    test_texts = [
        "你好",
        "今天天气很好",
        "人工智能是未来的趋势",
        "Hello world!",
        "这是一个测试。"
    ]
    
    for text in test_texts:
        print(f"\n原文: {text}")
        token_ids = tokenizer.encode(text)
        print(f"Token IDs: {token_ids[:20]}...")  # 只显示前20个
        decoded = tokenizer.decode(token_ids)
        print(f"解码: {decoded}")

if __name__ == "__main__":
    test_tokenizer()
