import re
from typing import List, Union


class TokenCounter:
    def __init__(self, method="approx"):
        """
        初始化Token计数器
        method: "approx" - 近似计算, "exact" - 需要指定分词器
        """
        self.method = method
        self.tokenizer = None

    def load_tokenizer(self, model_name: str = "abc"):
        """加载Hugging Face分词器"""
        try:
            from transformers import AutoTokenizer
            self.tokenizer = AutoTokenizer.from_pretrained(model_name)
            self.method = "exact"
        except ImportError:
            print("transformers not installed, falling back to approximate method")
            self.method = "approx"

    def count_tokens(self, text: str) -> int:
        """计算文本的token数量"""
        if self.method == "exact" and self.tokenizer:
            return len(self.tokenizer.encode(text))
        else:
            # 近似计算方法：基于空格和标点符号
            return self._approx_token_count(text)

    def _approx_token_count(self, text: str) -> int:
        """近似计算token数量（适用于英文）"""
        if not text:
            return 0

        # 简单的基于空格和标点符号的分词
        words = re.findall(r'\b\w+\b|[^\w\s]', text)
        return len(words)

    def count_batch_tokens(self, texts: List[str]) -> List[int]:
        """批量计算token数量"""
        return [self.count_tokens(text) for text in texts]


# 使用示例
counter = TokenCounter()

# 使用近似方法
text = "今天天气不错"
approx_count = counter.count_tokens(text)
print(f"近似Token数量: {approx_count}")

# 使用精确方法
counter.load_tokenizer("gpt2")
exact_count = counter.count_tokens(text)
print(f"精确Token数量: {exact_count}")