"""Token counting and cost estimation utilities."""

from typing import Dict, Any, Optional
import tiktoken
from dataclasses import dataclass
from datetime import datetime, timedelta
from collections import defaultdict


@dataclass
class TokenUsage:
    """Token usage tracking."""
    prompt_tokens: int = 0
    completion_tokens: int = 0
    total_tokens: int = 0
    estimated_cost: float = 0.0
    model: str = ""
    timestamp: datetime = None
    
    def __post_init__(self):
        if not self.timestamp:
            self.timestamp = datetime.utcnow()
        if not self.total_tokens:
            self.total_tokens = self.prompt_tokens + self.completion_tokens


class TokenCounter:
    """Counts tokens and tracks usage across models."""
    
    # Model to tokenizer mapping
    TOKENIZER_MAP = {
        "gpt-4": "cl100k_base",
        "gpt-3.5-turbo": "cl100k_base",
        "claude-3": "claude",  # Custom estimation
        "qwen": "qwen",  # Custom estimation
        "ernie": "ernie"  # Custom estimation
    }
    
    # Pricing per model (per 1K tokens)
    PRICING = {
        "gpt-4": {"prompt": 0.03, "completion": 0.06},
        "gpt-4-turbo": {"prompt": 0.01, "completion": 0.03},
        "gpt-3.5-turbo": {"prompt": 0.0015, "completion": 0.002},
        "claude-3-opus": {"prompt": 0.015, "completion": 0.075},
        "claude-3-sonnet": {"prompt": 0.003, "completion": 0.015},
        "claude-3-haiku": {"prompt": 0.00025, "completion": 0.00125},
        "qwen-max": {"prompt": 0.003, "completion": 0.009},  # CNY converted to USD
        "qwen-plus": {"prompt": 0.0006, "completion": 0.0018},
        "qwen-turbo": {"prompt": 0.0003, "completion": 0.0009},
        "ernie-bot-4": {"prompt": 0.0017, "completion": 0.0017},
        "ernie-bot": {"prompt": 0.0006, "completion": 0.0006}
    }
    
    def __init__(self):
        """Initialize token counter."""
        self._tokenizers = {}
        self._usage_history = []
        self._usage_by_model = defaultdict(lambda: TokenUsage())
        self._quotas = {}
        self._init_tokenizers()
    
    def _init_tokenizers(self) -> None:
        """Initialize tokenizer instances."""
        # OpenAI tokenizers
        try:
            self._tokenizers["cl100k_base"] = tiktoken.get_encoding("cl100k_base")
        except Exception:
            pass
    
    def count_tokens(self, text: str, model: str) -> int:
        """Count tokens for text using model-specific tokenizer.
        
        Args:
            text: Input text
            model: Model name
            
        Returns:
            Token count
        """
        # Determine tokenizer type
        tokenizer_type = None
        for model_prefix, t_type in self.TOKENIZER_MAP.items():
            if model_prefix in model.lower():
                tokenizer_type = t_type
                break
        
        if tokenizer_type == "cl100k_base" and tokenizer_type in self._tokenizers:
            # Use tiktoken for OpenAI models
            return len(self._tokenizers[tokenizer_type].encode(text))
        elif tokenizer_type == "claude":
            # Claude estimation: ~1 token per 4 characters
            return len(text) // 4
        elif tokenizer_type == "qwen" or tokenizer_type == "ernie":
            # Chinese models: 1 Chinese char ≈ 1 token, 1 English word ≈ 1.5 tokens
            chinese_chars = sum(1 for c in text if '\u4e00' <= c <= '\u9fff')
            english_words = len([w for w in text.split() if not any('\u4e00' <= c <= '\u9fff' for c in w)])
            return chinese_chars + int(english_words * 1.5)
        else:
            # Default estimation: ~1 token per 4 characters
            return len(text) // 4
    
    def estimate_cost(
        self,
        prompt_tokens: int,
        completion_tokens: int,
        model: str
    ) -> float:
        """Estimate cost for token usage.
        
        Args:
            prompt_tokens: Number of prompt tokens
            completion_tokens: Number of completion tokens
            model: Model name
            
        Returns:
            Estimated cost in USD
        """
        # Find pricing for model
        pricing = None
        for model_key in self.PRICING:
            if model_key in model.lower():
                pricing = self.PRICING[model_key]
                break
        
        if not pricing:
            # Default to GPT-3.5 pricing if unknown
            pricing = self.PRICING["gpt-3.5-turbo"]
        
        prompt_cost = (prompt_tokens / 1000) * pricing["prompt"]
        completion_cost = (completion_tokens / 1000) * pricing["completion"]
        
        return prompt_cost + completion_cost
    
    def track_usage(
        self,
        prompt_tokens: int,
        completion_tokens: int,
        model: str
    ) -> TokenUsage:
        """Track token usage.
        
        Args:
            prompt_tokens: Number of prompt tokens
            completion_tokens: Number of completion tokens
            model: Model name
            
        Returns:
            TokenUsage record
        """
        cost = self.estimate_cost(prompt_tokens, completion_tokens, model)
        
        usage = TokenUsage(
            prompt_tokens=prompt_tokens,
            completion_tokens=completion_tokens,
            total_tokens=prompt_tokens + completion_tokens,
            estimated_cost=cost,
            model=model
        )
        
        # Add to history
        self._usage_history.append(usage)
        
        # Update model totals
        model_usage = self._usage_by_model[model]
        model_usage.prompt_tokens += prompt_tokens
        model_usage.completion_tokens += completion_tokens
        model_usage.total_tokens += prompt_tokens + completion_tokens
        model_usage.estimated_cost += cost
        
        # Keep only last 1000 records
        if len(self._usage_history) > 1000:
            self._usage_history = self._usage_history[-1000:]
        
        # Check quotas
        self._check_quotas(model, usage)
        
        return usage
    
    def set_quota(self, model: str, daily_tokens: int = None, daily_cost: float = None) -> None:
        """Set usage quota for a model.
        
        Args:
            model: Model name
            daily_tokens: Daily token limit
            daily_cost: Daily cost limit in USD
        """
        self._quotas[model] = {
            "daily_tokens": daily_tokens,
            "daily_cost": daily_cost
        }
    
    def _check_quotas(self, model: str, usage: TokenUsage) -> None:
        """Check if usage exceeds quotas.
        
        Args:
            model: Model name
            usage: Current usage
        """
        if model not in self._quotas:
            return
        
        quota = self._quotas[model]
        cutoff = datetime.utcnow() - timedelta(days=1)
        
        # Get daily usage
        daily_usage = [
            u for u in self._usage_history
            if u.model == model and u.timestamp > cutoff
        ]
        
        daily_tokens = sum(u.total_tokens for u in daily_usage)
        daily_cost = sum(u.estimated_cost for u in daily_usage)
        
        # Check limits
        if quota["daily_tokens"] and daily_tokens > quota["daily_tokens"]:
            raise Exception(f"Daily token quota exceeded for {model}: {daily_tokens}/{quota['daily_tokens']}")
        
        if quota["daily_cost"] and daily_cost > quota["daily_cost"]:
            raise Exception(f"Daily cost quota exceeded for {model}: ${daily_cost:.2f}/${quota['daily_cost']:.2f}")
    
    def get_usage_report(self, model: str = None, days: int = 1) -> Dict[str, Any]:
        """Get usage report.
        
        Args:
            model: Optional specific model
            days: Number of days to report
            
        Returns:
            Usage report dictionary
        """
        cutoff = datetime.utcnow() - timedelta(days=days)
        
        if model:
            history = [u for u in self._usage_history if u.model == model and u.timestamp > cutoff]
        else:
            history = [u for u in self._usage_history if u.timestamp > cutoff]
        
        if not history:
            return {
                "total_tokens": 0,
                "total_cost": 0,
                "model_breakdown": {}
            }
        
        # Calculate totals
        total_tokens = sum(u.total_tokens for u in history)
        total_cost = sum(u.estimated_cost for u in history)
        
        # Model breakdown
        model_breakdown = defaultdict(lambda: {"tokens": 0, "cost": 0, "requests": 0})
        for usage in history:
            model_breakdown[usage.model]["tokens"] += usage.total_tokens
            model_breakdown[usage.model]["cost"] += usage.estimated_cost
            model_breakdown[usage.model]["requests"] += 1
        
        return {
            "period_days": days,
            "total_tokens": total_tokens,
            "total_cost": round(total_cost, 4),
            "model_breakdown": dict(model_breakdown),
            "hourly_distribution": self._get_hourly_distribution(history)
        }
    
    def _get_hourly_distribution(self, history: list) -> Dict[int, int]:
        """Get hourly distribution of usage.
        
        Args:
            history: Usage history
            
        Returns:
            Hour to token count mapping
        """
        distribution = defaultdict(int)
        for usage in history:
            hour = usage.timestamp.hour
            distribution[hour] += usage.total_tokens
        return dict(distribution)