"""Prometheus metrics for AI service."""

from typing import Optional
import logging
from prometheus_client import (
    Counter,
    Histogram,
    Gauge,
    Summary,
    start_http_server,
    REGISTRY
)

logger = logging.getLogger(__name__)


class PrometheusMetrics:
    """Prometheus metrics collector for AI service."""
    
    def __init__(self, service_name: str = "ai_service"):
        """Initialize Prometheus metrics.
        
        Args:
            service_name: Name of the service
        """
        self.service_name = service_name
        
        # Request metrics
        self.request_count = Counter(
            f"{service_name}_requests_total",
            "Total number of requests",
            ["method", "endpoint", "status"]
        )
        
        self.request_duration = Histogram(
            f"{service_name}_request_duration_seconds",
            "Request duration in seconds",
            ["method", "endpoint"],
            buckets=(0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0, 30.0, 60.0)
        )
        
        # LLM metrics
        self.llm_request_count = Counter(
            f"{service_name}_llm_requests_total",
            "Total number of LLM requests",
            ["model", "status"]
        )
        
        self.llm_token_count = Counter(
            f"{service_name}_llm_tokens_total",
            "Total number of tokens processed",
            ["model", "type"]  # type: prompt, completion, total
        )
        
        self.llm_latency = Histogram(
            f"{service_name}_llm_latency_seconds",
            "LLM response latency",
            ["model"],
            buckets=(0.5, 1.0, 2.0, 5.0, 10.0, 20.0, 30.0, 60.0)
        )
        
        self.llm_cost = Counter(
            f"{service_name}_llm_cost_dollars",
            "Total LLM API cost in dollars",
            ["model"]
        )
        
        # Chain metrics
        self.chain_execution_count = Counter(
            f"{service_name}_chain_executions_total",
            "Total number of chain executions",
            ["chain_type", "status"]
        )
        
        self.chain_execution_time = Histogram(
            f"{service_name}_chain_execution_seconds",
            "Chain execution time",
            ["chain_type"],
            buckets=(0.1, 0.5, 1.0, 2.0, 5.0, 10.0, 30.0, 60.0)
        )
        
        # Memory metrics
        self.conversation_count = Gauge(
            f"{service_name}_active_conversations",
            "Number of active conversations"
        )
        
        self.memory_usage = Gauge(
            f"{service_name}_memory_usage_bytes",
            "Memory usage in bytes",
            ["type"]  # type: conversation, cache, total
        )
        
        # Error metrics
        self.error_count = Counter(
            f"{service_name}_errors_total",
            "Total number of errors",
            ["error_type", "model"]
        )
        
        self.retry_count = Counter(
            f"{service_name}_retries_total",
            "Total number of retry attempts",
            ["reason", "model"]
        )
        
        # Model availability
        self.model_availability = Gauge(
            f"{service_name}_model_availability",
            "Model availability (1=available, 0=unavailable)",
            ["model"]
        )
        
        # Performance metrics
        self.time_to_first_token = Summary(
            f"{service_name}_time_to_first_token_seconds",
            "Time to first token in streaming",
            ["model"]
        )
        
        self.tokens_per_second = Summary(
            f"{service_name}_tokens_per_second",
            "Token generation rate",
            ["model"]
        )
    
    def record_request(
        self,
        method: str,
        endpoint: str,
        status: int,
        duration: float
    ):
        """Record HTTP request metrics.
        
        Args:
            method: HTTP method
            endpoint: API endpoint
            status: Response status code
            duration: Request duration in seconds
        """
        self.request_count.labels(
            method=method,
            endpoint=endpoint,
            status=str(status)
        ).inc()
        
        self.request_duration.labels(
            method=method,
            endpoint=endpoint
        ).observe(duration)
    
    def record_llm_request(
        self,
        model: str,
        success: bool,
        latency: float,
        prompt_tokens: int = 0,
        completion_tokens: int = 0,
        cost: float = 0.0
    ):
        """Record LLM request metrics.
        
        Args:
            model: Model name
            success: Whether request succeeded
            latency: Response latency in seconds
            prompt_tokens: Number of prompt tokens
            completion_tokens: Number of completion tokens
            cost: API cost in dollars
        """
        status = "success" if success else "failure"
        
        self.llm_request_count.labels(
            model=model,
            status=status
        ).inc()
        
        if success:
            self.llm_latency.labels(model=model).observe(latency)
            
            if prompt_tokens:
                self.llm_token_count.labels(
                    model=model,
                    type="prompt"
                ).inc(prompt_tokens)
            
            if completion_tokens:
                self.llm_token_count.labels(
                    model=model,
                    type="completion"
                ).inc(completion_tokens)
            
            total_tokens = prompt_tokens + completion_tokens
            if total_tokens:
                self.llm_token_count.labels(
                    model=model,
                    type="total"
                ).inc(total_tokens)
            
            if cost:
                self.llm_cost.labels(model=model).inc(cost)
    
    def record_chain_execution(
        self,
        chain_type: str,
        success: bool,
        duration: float
    ):
        """Record chain execution metrics.
        
        Args:
            chain_type: Type of chain
            success: Whether execution succeeded
            duration: Execution duration in seconds
        """
        status = "success" if success else "failure"
        
        self.chain_execution_count.labels(
            chain_type=chain_type,
            status=status
        ).inc()
        
        if success:
            self.chain_execution_time.labels(
                chain_type=chain_type
            ).observe(duration)
    
    def record_error(
        self,
        error_type: str,
        model: Optional[str] = None
    ):
        """Record error occurrence.
        
        Args:
            error_type: Type of error
            model: Model that caused error (if applicable)
        """
        self.error_count.labels(
            error_type=error_type,
            model=model or "unknown"
        ).inc()
    
    def record_retry(
        self,
        reason: str,
        model: Optional[str] = None
    ):
        """Record retry attempt.
        
        Args:
            reason: Reason for retry
            model: Model being retried
        """
        self.retry_count.labels(
            reason=reason,
            model=model or "unknown"
        ).inc()
    
    def set_model_availability(
        self,
        model: str,
        available: bool
    ):
        """Set model availability status.
        
        Args:
            model: Model name
            available: Whether model is available
        """
        self.model_availability.labels(model=model).set(
            1.0 if available else 0.0
        )
    
    def set_active_conversations(self, count: int):
        """Set number of active conversations.
        
        Args:
            count: Number of active conversations
        """
        self.conversation_count.set(count)
    
    def set_memory_usage(
        self,
        memory_type: str,
        bytes_used: int
    ):
        """Set memory usage.
        
        Args:
            memory_type: Type of memory
            bytes_used: Bytes used
        """
        self.memory_usage.labels(type=memory_type).set(bytes_used)
    
    def record_streaming_metrics(
        self,
        model: str,
        time_to_first: float,
        tokens_per_second: float
    ):
        """Record streaming performance metrics.
        
        Args:
            model: Model name
            time_to_first: Time to first token in seconds
            tokens_per_second: Token generation rate
        """
        self.time_to_first_token.labels(model=model).observe(time_to_first)
        self.tokens_per_second.labels(model=model).observe(tokens_per_second)
    
    def start_http_server_async(self, port: int = 9091):
        """Start Prometheus HTTP server.
        
        Args:
            port: Port to serve metrics
        """
        try:
            start_http_server(port)
            logger.info(f"Prometheus metrics server started on port {port}")
        except Exception as e:
            logger.error(f"Failed to start metrics server: {e}")