"""Content generation chain implementation."""

from typing import AsyncGenerator, Dict, Any, Optional, List
import logging
from langchain.chains import LLMChain
from langchain.prompts import ChatPromptTemplate
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain_openai import ChatOpenAI
from langchain_community.llms import QianfanLLMEndpoint
from motor.motor_asyncio import AsyncIOMotorClient
from bson import ObjectId

from ..core.config import settings
from ..callbacks.streaming import StreamingCallback

logger = logging.getLogger(__name__)


class ContentGenerationChain:
    """Chain for content generation with multiple model support."""
    
    def __init__(self, mongodb_client: Optional[AsyncIOMotorClient] = None):
        """Initialize the content generation chain.
        
        Args:
            mongodb_client: Optional MongoDB client for template storage
        """
        self.mongodb_client = mongodb_client
        self.db = mongodb_client.get_database(settings.mongodb_database) if mongodb_client else None
        
        # Initialize models dictionary
        self.models = self._initialize_models()
        
    def _initialize_models(self) -> Dict[str, Any]:
        """Initialize available language models.
        
        Returns:
            Dictionary of model name to model instance
        """
        models = {}
        
        # OpenAI models
        if settings.openai_api_key:
            models["gpt-4"] = ChatOpenAI(
                model="gpt-4",
                temperature=0.7,
                streaming=True,
                openai_api_key=settings.openai_api_key
            )
            models["gpt-3.5-turbo"] = ChatOpenAI(
                model="gpt-3.5-turbo",
                temperature=0.7,
                streaming=True,
                openai_api_key=settings.openai_api_key
            )
        
        # DeepSeek models (using OpenAI-compatible API)
        if settings.deepseek_api_key:
            models["deepseek-chat"] = ChatOpenAI(
                model="deepseek-chat",
                temperature=0.7,
                streaming=True,
                openai_api_key=settings.deepseek_api_key,
                openai_api_base=settings.deepseek_base_url + "/v1"
            )
            models["deepseek-coder"] = ChatOpenAI(
                model="deepseek-coder",
                temperature=0.7,
                streaming=True,
                openai_api_key=settings.deepseek_api_key,
                openai_api_base=settings.deepseek_base_url + "/v1"
            )
        
        # Don't add any default models - models should be dynamically created based on user config
        if not models:
            logger.info("No built-in models configured. Models will be created dynamically based on user configuration.")
        
        # Baidu Qianfan models
        if settings.baidu_api_key and settings.baidu_secret_key:
            try:
                models["qwen"] = QianfanLLMEndpoint(
                    model="qwen-max",
                    temperature=0.7,
                    qianfan_ak=settings.baidu_api_key,
                    qianfan_sk=settings.baidu_secret_key
                )
                models["ernie"] = QianfanLLMEndpoint(
                    model="ernie-bot-4",
                    temperature=0.7,
                    qianfan_ak=settings.baidu_api_key,
                    qianfan_sk=settings.baidu_secret_key
                )
            except Exception as e:
                logger.warning(f"Failed to initialize Qianfan models: {e}")
        
        if not models:
            logger.warning("No models initialized. Please configure API keys.")
            
        return models
    
    async def generate(
        self,
        template_id: str,
        variables: Dict[str, Any],
        model_name: str = "gpt-3.5-turbo",
        callbacks: Optional[List] = None
    ) -> AsyncGenerator[str, None]:
        """Generate content using specified template and model.
        
        Args:
            template_id: ID of the prompt template to use
            variables: Variables to inject into the template
            model_name: Name of the model to use
            callbacks: Optional list of callback handlers
            
        Yields:
            Generated content chunks
        """
        # Load template
        template = await self.load_template(template_id)
        if not template:
            raise ValueError(f"Template {template_id} not found")
        
        # Build prompt
        prompt = ChatPromptTemplate.from_messages([
            ("system", template.get("system_prompt", "")),
            ("user", template.get("user_prompt", ""))
        ])
        
        # Select model
        llm = self.models.get(model_name)
        if not llm:
            logger.warning(f"Model {model_name} not available, trying default")
            # Try default model first
            llm = self.models.get(settings.default_model)
            # If default model not available, try first available model
            if not llm and self.models:
                llm = list(self.models.values())[0]
            # If no models available, raise error
            if not llm:
                available_models = list(self.models.keys())
                raise ValueError(f"Model '{model_name}' not found and no alternative models available. Available models: {available_models}")
        
        # Add callbacks
        if callbacks:
            llm.callbacks = callbacks
        
        # Create chain
        chain = LLMChain(llm=llm, prompt=prompt)
        
        # Stream generation
        try:
            async for chunk in chain.astream(variables):
                if isinstance(chunk, dict) and "text" in chunk:
                    yield chunk["text"]
                else:
                    yield str(chunk)
        except Exception as e:
            logger.error(f"Error during generation: {e}")
            raise
    
    async def stream(
        self,
        prompt: str,
        model_name: str = "gpt-3.5-turbo",
        callbacks: Optional[List] = None,
        **kwargs
    ) -> AsyncGenerator[str, None]:
        """Stream generation with direct prompt.
        
        Args:
            prompt: Direct prompt text
            model_name: Name of the model to use
            callbacks: Optional list of callback handlers
            **kwargs: Additional model parameters
            
        Yields:
            Generated content chunks
        """
        # Select model
        llm = self.models.get(model_name)
        if not llm:
            # Try default model first
            llm = self.models.get(settings.default_model)
            # If default model not available, try first available model
            if not llm and self.models:
                llm = list(self.models.values())[0]
            # If no models available, raise error
            if not llm:
                available_models = list(self.models.keys())
                raise ValueError(f"Model '{model_name}' not found and no alternative models available. Available models: {available_models}")
        
        # Add callbacks
        if callbacks:
            llm.callbacks = callbacks
        
        # Update model parameters
        for key, value in kwargs.items():
            if hasattr(llm, key):
                setattr(llm, key, value)
        
        # Stream generation
        try:
            async for chunk in llm.astream(prompt):
                if hasattr(chunk, 'content'):
                    yield chunk.content
                else:
                    yield str(chunk)
        except Exception as e:
            logger.error(f"Error during streaming: {e}")
            raise
    
    async def load_template(self, template_id: str) -> Optional[Dict[str, Any]]:
        """Load prompt template from MongoDB.
        
        Args:
            template_id: ID of the template to load
            
        Returns:
            Template dictionary or None if not found
        """
        if not self.db:
            logger.warning("MongoDB not connected, cannot load template")
            return None
        
        try:
            template = await self.db.prompt_templates.find_one(
                {"_id": ObjectId(template_id) if ObjectId.is_valid(template_id) else template_id}
            )
            return template
        except Exception as e:
            logger.error(f"Error loading template {template_id}: {e}")
            return None
    
    def get_available_models(self) -> List[str]:
        """Get list of available model names.
        
        Returns:
            List of model names
        """
        return list(self.models.keys())
    
    def get_model_info(self, model_name: str) -> Dict[str, Any]:
        """Get information about a specific model.
        
        Args:
            model_name: Name of the model
            
        Returns:
            Model information dictionary
        """
        model = self.models.get(model_name)
        if not model:
            return {"error": f"Model {model_name} not found"}
        
        info = {
            "name": model_name,
            "type": type(model).__name__,
            "temperature": getattr(model, "temperature", None),
            "max_tokens": getattr(model, "max_tokens", None),
            "streaming": getattr(model, "streaming", False)
        }
        
        return info