"""Metrics callback for monitoring LangChain operations."""

from typing import Any, Dict, List, Union, Optional
from datetime import datetime
import logging
from langchain.callbacks.base import AsyncCallbackHandler
from langchain.schema import LLMResult

from ..metrics.prometheus import PrometheusMetrics

logger = logging.getLogger(__name__)


class MetricsCallback(AsyncCallbackHandler):
    """Callback handler for collecting metrics."""
    
    def __init__(self, metrics: Optional[PrometheusMetrics] = None):
        """Initialize metrics callback.
        
        Args:
            metrics: PrometheusMetrics instance
        """
        self.metrics = metrics or PrometheusMetrics()
        self.start_time = None
        self.model_name = None
    
    async def on_llm_start(
        self,
        serialized: Dict[str, Any],
        prompts: List[str],
        **kwargs: Any
    ):
        """Called when LLM starts.
        
        Args:
            serialized: Serialized LLM
            prompts: List of prompts
            **kwargs: Additional arguments
        """
        self.start_time = datetime.utcnow()
        self.model_name = serialized.get("name", "unknown")
    
    async def on_llm_end(
        self,
        response: LLMResult,
        **kwargs: Any
    ):
        """Called when LLM completes.
        
        Args:
            response: LLM response
            **kwargs: Additional arguments
        """
        if not self.start_time:
            return
        
        latency = (datetime.utcnow() - self.start_time).total_seconds()
        
        # Extract token usage
        prompt_tokens = 0
        completion_tokens = 0
        cost = 0.0
        
        if response.llm_output:
            token_usage = response.llm_output.get("token_usage", {})
            prompt_tokens = token_usage.get("prompt_tokens", 0)
            completion_tokens = token_usage.get("completion_tokens", 0)
            
            # Calculate cost (simplified)
            if self.model_name and "gpt" in self.model_name.lower():
                if "gpt-4" in self.model_name:
                    cost = (prompt_tokens * 0.03 + completion_tokens * 0.06) / 1000
                else:
                    cost = (prompt_tokens * 0.0015 + completion_tokens * 0.002) / 1000
        
        # Record metrics
        self.metrics.record_llm_request(
            model=self.model_name or "unknown",
            success=True,
            latency=latency,
            prompt_tokens=prompt_tokens,
            completion_tokens=completion_tokens,
            cost=cost
        )
        
        # Reset
        self.start_time = None
    
    async def on_llm_error(
        self,
        error: Union[Exception, KeyboardInterrupt],
        **kwargs: Any
    ):
        """Called when LLM encounters error.
        
        Args:
            error: Error that occurred
            **kwargs: Additional arguments
        """
        if self.start_time:
            latency = (datetime.utcnow() - self.start_time).total_seconds()
            
            self.metrics.record_llm_request(
                model=self.model_name or "unknown",
                success=False,
                latency=latency
            )
            
            self.metrics.record_error(
                error_type=type(error).__name__,
                model=self.model_name
            )
        
        # Reset
        self.start_time = None
    
    async def on_chain_start(
        self,
        serialized: Dict[str, Any],
        inputs: Dict[str, Any],
        **kwargs: Any
    ):
        """Called when chain starts.
        
        Args:
            serialized: Serialized chain
            inputs: Chain inputs
            **kwargs: Additional arguments
        """
        self.chain_start_time = datetime.utcnow()
        self.chain_type = serialized.get("name", "unknown")
    
    async def on_chain_end(
        self,
        outputs: Dict[str, Any],
        **kwargs: Any
    ):
        """Called when chain completes.
        
        Args:
            outputs: Chain outputs
            **kwargs: Additional arguments
        """
        if hasattr(self, 'chain_start_time'):
            duration = (datetime.utcnow() - self.chain_start_time).total_seconds()
            
            self.metrics.record_chain_execution(
                chain_type=getattr(self, 'chain_type', 'unknown'),
                success=True,
                duration=duration
            )
    
    async def on_chain_error(
        self,
        error: Union[Exception, KeyboardInterrupt],
        **kwargs: Any
    ):
        """Called when chain encounters error.
        
        Args:
            error: Error that occurred
            **kwargs: Additional arguments
        """
        if hasattr(self, 'chain_start_time'):
            duration = (datetime.utcnow() - self.chain_start_time).total_seconds()
            
            self.metrics.record_chain_execution(
                chain_type=getattr(self, 'chain_type', 'unknown'),
                success=False,
                duration=duration
            )
            
            self.metrics.record_error(
                error_type=type(error).__name__,
                model=None
            )