"""Streaming callback implementation for real-time output."""

from typing import Any, Dict, List, Optional, Union
import asyncio
import json
import logging
from datetime import datetime
from langchain.callbacks.base import AsyncCallbackHandler
from langchain.schema import BaseMessage, LLMResult

logger = logging.getLogger(__name__)


class StreamingCallback(AsyncCallbackHandler):
    """Callback handler for streaming LLM output."""
    
    def __init__(
        self,
        websocket=None,
        sse_queue: Optional[asyncio.Queue] = None,
        track_tokens: bool = True,
        track_cost: bool = True
    ):
        """Initialize streaming callback.
        
        Args:
            websocket: WebSocket connection for streaming
            sse_queue: Queue for Server-Sent Events
            track_tokens: Whether to track token usage
            track_cost: Whether to track cost
        """
        self.websocket = websocket
        self.sse_queue = sse_queue
        self.track_tokens = track_tokens
        self.track_cost = track_cost
        
        # Token and cost tracking
        self.total_tokens = 0
        self.prompt_tokens = 0
        self.completion_tokens = 0
        self.total_cost = 0.0
        
        # Timing
        self.start_time = None
        self.first_token_time = None
        self.end_time = None
        
        # Content buffer
        self.generated_content = ""
        
    async def on_llm_start(
        self,
        serialized: Dict[str, Any],
        prompts: List[str],
        **kwargs: Any
    ):
        """Called when LLM starts generating.
        
        Args:
            serialized: Serialized LLM
            prompts: List of prompts
            **kwargs: Additional arguments
        """
        self.start_time = datetime.utcnow()
        self.generated_content = ""
        
        await self._send_event({
            "type": "generation_start",
            "timestamp": self.start_time.isoformat(),
            "model": serialized.get("name", "unknown")
        })
        
        logger.debug(f"Generation started with {len(prompts)} prompts")
    
    async def on_llm_new_token(
        self,
        token: str,
        **kwargs: Any
    ):
        """Called when LLM generates a new token.
        
        Args:
            token: Generated token
            **kwargs: Additional arguments
        """
        if self.first_token_time is None:
            self.first_token_time = datetime.utcnow()
            time_to_first_token = (self.first_token_time - self.start_time).total_seconds()
            logger.debug(f"Time to first token: {time_to_first_token:.2f}s")
        
        self.generated_content += token
        
        # Stream token to client
        await self._send_event({
            "type": "token",
            "content": token,
            "timestamp": datetime.utcnow().isoformat()
        })
        
        # Track tokens if enabled
        if self.track_tokens:
            self.completion_tokens += 1
    
    async def on_llm_end(
        self,
        response: LLMResult,
        **kwargs: Any
    ):
        """Called when LLM finishes generating.
        
        Args:
            response: LLM response
            **kwargs: Additional arguments
        """
        self.end_time = datetime.utcnow()
        generation_time = (self.end_time - self.start_time).total_seconds()
        
        # Extract token usage if available
        if response.llm_output:
            token_usage = response.llm_output.get("token_usage", {})
            self.prompt_tokens = token_usage.get("prompt_tokens", 0)
            self.completion_tokens = token_usage.get("completion_tokens", 0)
            self.total_tokens = token_usage.get("total_tokens", 0)
            
            # Calculate cost if enabled
            if self.track_cost:
                self.total_cost = self._calculate_cost(
                    response.llm_output.get("model_name", ""),
                    self.prompt_tokens,
                    self.completion_tokens
                )
        
        await self._send_event({
            "type": "generation_complete",
            "timestamp": self.end_time.isoformat(),
            "generation_time": generation_time,
            "total_tokens": self.total_tokens,
            "prompt_tokens": self.prompt_tokens,
            "completion_tokens": self.completion_tokens,
            "cost": self.total_cost if self.track_cost else None
        })
        
        logger.info(
            f"Generation complete: {self.total_tokens} tokens in {generation_time:.2f}s"
        )
    
    async def on_llm_error(
        self,
        error: Union[Exception, KeyboardInterrupt],
        **kwargs: Any
    ):
        """Called when LLM encounters an error.
        
        Args:
            error: The error that occurred
            **kwargs: Additional arguments
        """
        await self._send_event({
            "type": "error",
            "error": str(error),
            "timestamp": datetime.utcnow().isoformat()
        })
        
        logger.error(f"LLM error: {error}")
    
    async def on_chain_start(
        self,
        serialized: Dict[str, Any],
        inputs: Dict[str, Any],
        **kwargs: Any
    ):
        """Called when chain starts.
        
        Args:
            serialized: Serialized chain
            inputs: Chain inputs
            **kwargs: Additional arguments
        """
        await self._send_event({
            "type": "chain_start",
            "chain": serialized.get("name", "unknown"),
            "timestamp": datetime.utcnow().isoformat()
        })
    
    async def on_chain_end(
        self,
        outputs: Dict[str, Any],
        **kwargs: Any
    ):
        """Called when chain ends.
        
        Args:
            outputs: Chain outputs
            **kwargs: Additional arguments
        """
        await self._send_event({
            "type": "chain_end",
            "timestamp": datetime.utcnow().isoformat()
        })
    
    async def _send_event(self, event: Dict[str, Any]):
        """Send event to connected clients.
        
        Args:
            event: Event data to send
        """
        try:
            # Send via WebSocket if available
            if self.websocket:
                await self.websocket.send_json(event)
            
            # Send via SSE queue if available
            if self.sse_queue:
                await self.sse_queue.put(json.dumps(event))
                
        except Exception as e:
            logger.error(f"Error sending event: {e}")
    
    def _calculate_cost(
        self,
        model_name: str,
        prompt_tokens: int,
        completion_tokens: int
    ) -> float:
        """Calculate generation cost based on model and tokens.
        
        Args:
            model_name: Name of the model
            prompt_tokens: Number of prompt tokens
            completion_tokens: Number of completion tokens
            
        Returns:
            Estimated cost in USD
        """
        # Pricing per 1K tokens (approximate)
        pricing = {
            "gpt-4": {"prompt": 0.03, "completion": 0.06},
            "gpt-4-32k": {"prompt": 0.06, "completion": 0.12},
            "gpt-3.5-turbo": {"prompt": 0.0015, "completion": 0.002},
            "gpt-3.5-turbo-16k": {"prompt": 0.003, "completion": 0.004},
        }
        
        if model_name in pricing:
            prompt_cost = (prompt_tokens / 1000) * pricing[model_name]["prompt"]
            completion_cost = (completion_tokens / 1000) * pricing[model_name]["completion"]
            return prompt_cost + completion_cost
        
        return 0.0
    
    def get_metrics(self) -> Dict[str, Any]:
        """Get callback metrics.
        
        Returns:
            Dictionary of metrics
        """
        metrics = {
            "total_tokens": self.total_tokens,
            "prompt_tokens": self.prompt_tokens,
            "completion_tokens": self.completion_tokens,
            "generated_content_length": len(self.generated_content),
        }
        
        if self.track_cost:
            metrics["total_cost"] = self.total_cost
        
        if self.start_time and self.end_time:
            metrics["generation_time"] = (
                self.end_time - self.start_time
            ).total_seconds()
        
        if self.first_token_time and self.start_time:
            metrics["time_to_first_token"] = (
                self.first_token_time - self.start_time
            ).total_seconds()
        
        return metrics
    
    def reset(self):
        """Reset callback state."""
        self.total_tokens = 0
        self.prompt_tokens = 0
        self.completion_tokens = 0
        self.total_cost = 0.0
        self.start_time = None
        self.first_token_time = None
        self.end_time = None
        self.generated_content = ""


class ProgressCallback(AsyncCallbackHandler):
    """Callback for tracking generation progress."""
    
    def __init__(self, total_steps: Optional[int] = None):
        """Initialize progress callback.
        
        Args:
            total_steps: Total number of expected steps
        """
        self.total_steps = total_steps
        self.current_step = 0
        self.progress_handlers = []
    
    def add_progress_handler(self, handler):
        """Add a progress handler function.
        
        Args:
            handler: Function to call with progress updates
        """
        self.progress_handlers.append(handler)
    
    async def on_chain_start(self, *args, **kwargs):
        """Called when chain starts."""
        self.current_step += 1
        await self._update_progress()
    
    async def _update_progress(self):
        """Update progress handlers."""
        progress = {
            "current": self.current_step,
            "total": self.total_steps,
            "percentage": (
                (self.current_step / self.total_steps * 100)
                if self.total_steps
                else None
            )
        }
        
        for handler in self.progress_handlers:
            try:
                if asyncio.iscoroutinefunction(handler):
                    await handler(progress)
                else:
                    handler(progress)
            except Exception as e:
                logger.error(f"Error in progress handler: {e}")


def create_streaming_callback(
    websocket_manager=None,
    task_id: Optional[str] = None,
    sse_queue: Optional[asyncio.Queue] = None
) -> StreamingCallback:
    """创建流式回调实例.
    
    Args:
        websocket_manager: WebSocket管理器实例
        task_id: 任务ID
        sse_queue: SSE队列
        
    Returns:
        StreamingCallback实例
    """
    class WebSocketProxy:
        """WebSocket代理，用于通过管理器发送消息."""
        
        def __init__(self, manager, task_id):
            self.manager = manager
            self.task_id = task_id
            
        async def send_json(self, data):
            """通过管理器发送JSON数据到任务的所有客户端."""
            if self.manager and self.task_id:
                await self.manager.send_to_task(self.task_id, data)
    
    # 创建WebSocket代理
    ws_proxy = None
    if websocket_manager and task_id:
        ws_proxy = WebSocketProxy(websocket_manager, task_id)
    
    return StreamingCallback(
        websocket=ws_proxy,
        sse_queue=sse_queue,
        track_tokens=True,
        track_cost=True
    )