import re
from typing import Tuple, Optional

def count_tokens_simple(text: str) -> int:
    """
    简单的token计数方法
    中文按2.5个字符一个token，英文按4个字符一个token
    """
    if not text:
        return 0
    
    # 计算中文字符数量
    chinese_chars = len(re.findall(r'[\u4e00-\u9fff]', text))
    # 计算总字符数
    total_chars = len(text)
    # 英文字符数 = 总字符数 - 中文字符数
    english_chars = total_chars - chinese_chars
    
    # 估算token数量
    estimated_tokens = int(chinese_chars / 2.5 + english_chars / 4)
    return max(estimated_tokens, total_chars // 6)  # 最小保护

def check_prompt_limit(prompt: str, model: str = "deepseek", max_tokens: Optional[int] = None) -> Tuple[bool, dict]:
    """
    检查提示词是否超过模型限制
    
    Args:
        prompt: 提示词文本
        model: 模型名称 (gpt-4o, gpt-4, gpt-3.5-turbo, claude-3-sonnet等)
        max_tokens: 自定义最大token限制
        
    Returns:
        (是否在限制内, 详细信息)
    """
    # 简单的模型限制配置
    model_limits = {
        "Qwen2.5": 64000,
        "Qwen2.5-32B": 64000,
        "deepseek": 64000,
        "default": 60000
    }
    
    # 获取限制
    limit = max_tokens if max_tokens else model_limits.get(model, model_limits["default"])
    
    # 计算token数量
    token_count = count_tokens_simple(prompt)
    
    # 检查是否超限
    within_limit = token_count <= limit
    usage_percent = round((token_count / limit) * 100, 1)
    
    return within_limit, {
        "token_count": token_count,
        "limit": limit,
        "within_limit": within_limit,
        "usage_percent": usage_percent,
        "char_count": len(prompt),
        "remaining_tokens": limit - token_count if within_limit else 0
    }

def optimize_prompt_simple(prompt: str, model: str = "gpt-4o", target_percent: float = 80.0) -> str:
    """
    简单优化提示词，如果超出限制就截取
    
    Args:
        prompt: 原始提示词
        model: 模型名称
        target_percent: 目标使用百分比
        
    Returns:
        优化后的提示词
    """
    within_limit, info = check_prompt_limit(prompt, model)
    
    if within_limit and info["usage_percent"] <= target_percent:
        return prompt
    
    # 计算目标长度
    target_tokens = int(info["limit"] * (target_percent / 100))
    
    if info["token_count"] > target_tokens:
        # 按比例截取
        ratio = target_tokens / info["token_count"]
        target_length = int(len(prompt) * ratio * 0.9)  # 留10%余量
        
        truncated = prompt[:target_length]
        
        # 尝试在句子边界截取
        for delimiter in ['。', '！', '？', '.', '!', '?', '\n']:
            last_pos = truncated.rfind(delimiter)
            if last_pos > target_length * 0.8:  # 如果边界不太远
                return truncated[:last_pos + 1]
        
        return truncated
    
    return prompt

# 便捷函数
def is_prompt_too_long(prompt: str, model: str = "gpt-4o") -> bool:
    """检查提示词是否过长"""
    within_limit, _ = check_prompt_limit(prompt, model)
    return not within_limit

def get_token_count(text: str) -> int:
    """获取文本的token数量"""
    return count_tokens_simple(text)

def get_prompt_info(prompt: str, model: str = "gpt-4o") -> str:
    """获取提示词信息的简单文本描述"""
    within_limit, info = check_prompt_limit(prompt, model)
    
    status = "✓ 可用" if within_limit else "✗ 超限"
    
    return f"""提示词检查结果:
字符数: {info['char_count']}
Token数: {info['token_count']}
模型限制: {info['limit']}
使用率: {info['usage_percent']}%
状态: {status}
剩余空间: {info['remaining_tokens']} tokens""" 