import re
from typing import List, Tuple, Dict, Optional
from wenet.text.base_tokenizer import BaseTokenizer

class MixedTokenizer(BaseTokenizer):
    """
    支持中英文混合词表的分词器，能够处理汉字、字母、数字和标点符号
    """
    def __init__(
        self,
        symbol_table_path: str,
        non_lang_syms_path: Optional[str] = None,
        split_with_space: bool = False,
        connect_symbol: str = '',
        **kwargs
    ):
        super().__init__()
        self.split_with_space = split_with_space
        self.connect_symbol = connect_symbol
        
        # 加载符号表
        self.symbol_table = self._load_symbol_table(symbol_table_path)
        
        # 加载非语言符号（如果有）
        self.non_lang_symbols = self._load_non_lang_symbols(non_lang_syms_path)
        
        # 预编译正则表达式模式
        self.english_pattern = re.compile(r'[a-zA-Z]+')
        self.number_pattern = re.compile(r'[0-9]+')
        self.chinese_pattern = re.compile(r'[\u4e00-\u9fff]')  # 中文Unicode范围
        
    def _load_symbol_table(self, symbol_table_path: str) -> Dict[str, int]:
        """加载符号表文件，返回字符到ID的映射"""
        symbol_table = {}
        with open(symbol_table_path, 'r', encoding='utf-8') as f:
            for line in f:
                parts = line.strip().split()
                if len(parts) >= 2:
                    symbol, idx = parts[0], int(parts[1])
                    symbol_table[symbol] = idx
        return symbol_table
    
    def _load_non_lang_symbols(self, non_lang_syms_path: Optional[str]) -> List[str]:
        """加载非语言符号列表"""
        if non_lang_syms_path is None:
            return []
        
        with open(non_lang_syms_path, 'r', encoding='utf-8') as f:
            return [line.strip() for line in f if line.strip()]
    
    def text2tokens(self, text: str) -> List[str]:
        """将文本转换为token列表"""
        # 如果指定按空格分割，直接按空格分割处理
        if self.split_with_space:
            return text.strip().split()
        
        tokens = []
        i = 0
        text_length = len(text)
        
        while i < text_length:
            # 检查是否是非语言符号
            if self.non_lang_symbols:
                for symbol in self.non_lang_symbols:
                    if text.startswith(symbol, i):
                        tokens.append(symbol)
                        i += len(symbol)
                        continue
            
            char = text[i]
            
            # 处理中文字符
            if self.chinese_pattern.match(char):
                tokens.append(char)
                i += 1
                continue
            
            # 处理英文字母和数字序列
            if self.english_pattern.match(char) or self.number_pattern.match(char):
                j = i + 1
                while j < text_length and (
                    self.english_pattern.match(text[j]) or 
                    self.number_pattern.match(text[j])
                ):
                    j += 1
                tokens.append(text[i:j])
                i = j
                continue
            
            # 处理其他字符（如标点符号）
            tokens.append(char)
            i += 1
        
        return tokens
    
    def tokens2text(self, tokens: List[str]) -> str:
        """将token列表转换回文本"""
        return self.connect_symbol.join(tokens)
    
    def tokens2ids(self, tokens: List[str]) -> List[int]:
        """将token列表转换为ID列表"""
        return [self.symbol_table.get(token, self.symbol_table.get('<unk>', 1)) for token in tokens]
    
    def ids2tokens(self, ids: List[int]) -> List[str]:
        """将ID列表转换为token列表"""
        id2symbol = {v: k for k, v in self.symbol_table.items()}
        return [id2symbol.get(idx, '<unk>') for idx in ids]
    
    def text2ids(self, text: str) -> List[int]:
        """将文本直接转换为ID列表"""
        tokens = self.text2tokens(text)
        return self.tokens2ids(tokens)
    
    def ids2text(self, ids: List[int]) -> str:
        """将ID列表直接转换回文本"""
        tokens = self.ids2tokens(ids)
        return self.tokens2text(tokens)