"""
模型工厂类 - 集成Langfuse 2.x Handler
基于Langfuse 2.x低级SDK实现
"""

import os
import logging
from typing import Optional
from langchain_openai import ChatOpenAI
from config import Config
from monitoring.langfuse_manager import langfuse_manager

logger = logging.getLogger(__name__)

class ChatModelFactory:
    """聊天模型工厂类 - 集成Langfuse 2.x Handler"""
    
    model_params = {
        "temperature": 0.7,
        "seed": 42
    }
    
    def __init__(self, user_id: str = None, session_id: str = None):
        self.user_id = user_id
        self.session_id = session_id
        self.handler = None
        self.model = None
    
    def initHandler(self, trace_name: str = "Conductor"):
        """初始化Handler"""
        if langfuse_manager.is_enabled():
            self.handler = langfuse_manager.create_handler(
                trace_name=trace_name,
                user_id=self.user_id,
                session_id=self.session_id
            )
            
            if self.handler:
                logger.info(f"Langfuse Handler initialized: {trace_name}")
            else:
                logger.warning("Failed to create Langfuse Handler")
        else:
            logger.info("Langfuse is disabled, skipping Handler initialization")
    
    def setHandler(self, trace_name: str = None, user_id: str = None, session_id: str = None):
        """设置Handler参数"""
        if trace_name:
            self.handler.trace_name = trace_name
        if user_id:
            self.user_id = user_id
            if self.handler:
                self.handler.user_id = user_id
        if session_id:
            self.session_id = session_id
            if self.handler:
                self.handler.session_id = session_id
    
    @classmethod
    def get_model(cls, model_name: str = None, model_version: str = None, 
                  user_id: str = None, session_id: str = None):
        """获取模型实例"""
        model_name = model_name or Config.LLM_MODEL_NAME
        
        # 创建模型实例
        if "gpt" in model_name.lower():
            if not model_version:
                logger.info('驱动 GPT 模型')
                model = ChatOpenAI(
                    model=model_name,
                    openai_api_key=Config.OPENAI_API_KEY,
                    openai_api_base=Config.OPENAI_BASE_URL,
                    **cls.model_params
                )
            else:
                logger.info('驱动 微软GPT 模型')
                model = ChatOpenAI(
                    model=model_name,
                    openai_api_key=Config.OPENAI_API_KEY,
                    openai_api_base=Config.OPENAI_BASE_URL,
                    api_version=model_version,
                    **cls.model_params
                )
        else:
            logger.info('驱动 通用模型')
            model = ChatOpenAI(
                model=model_name,
                openai_api_key=Config.OPENAI_API_KEY,
                openai_api_base=Config.OPENAI_BASE_URL,
                **cls.model_params
            )
        
        # 创建工厂实例并初始化Handler
        factory = cls(user_id=user_id, session_id=session_id)
        factory.model = model
        factory.initHandler()
        
        # 设置模型回调（如果Handler可用）
        if factory.handler:
            # 在2.x版本中，我们手动管理追踪，不需要设置callbacks
            logger.info("Model created with Langfuse Handler")
        else:
            logger.info("Model created without Langfuse Handler")
        
        return factory
    
    @classmethod
    def get_default_model(cls, user_id: str = None, session_id: str = None):
        """获取默认模型"""
        return cls.get_model(
            model_name=Config.LLM_MODEL_NAME,
            user_id=user_id,
            session_id=session_id
        )
    
    def invoke(self, messages, **kwargs):
        """调用模型 - 手动追踪Langfuse"""
        # 提取Langfuse相关参数，避免传递给LLM
        user_id = kwargs.pop("user_id", None)
        session_id = kwargs.pop("session_id", None)
        
        # 使用Langfuse Handler进行手动追踪
        from monitoring.langfuse_manager import langfuse_manager
        import time
        
        if langfuse_manager.is_enabled():
            # 创建Handler
            handler = langfuse_manager.create_handler(
                trace_name="model_invoke",
                user_id=user_id,
                session_id=session_id
            )
            
            if handler:
                # 开始追踪
                handler.start_trace()
                
                # 记录开始时间
                start_time = time.time()
                
                try:
                    result = self.model.invoke(messages, **kwargs)
                    
                    # 计算执行时间
                    end_time = time.time()
                    latency = end_time - start_time
                    
                    # 提取token使用信息
                    prompt_tokens = 0
                    completion_tokens = 0
                    
                    # 尝试多种方式获取token使用信息
                    if hasattr(result, 'response_metadata') and result.response_metadata:
                        usage = result.response_metadata.get('usage', {})
                        prompt_tokens = usage.get('prompt_tokens', 0)
                        completion_tokens = usage.get('completion_tokens', 0)
                    elif hasattr(result, 'usage'):
                        if hasattr(result.usage, 'prompt_tokens'):
                            prompt_tokens = result.usage.prompt_tokens
                        if hasattr(result.usage, 'completion_tokens'):
                            completion_tokens = result.usage.completion_tokens
                    elif hasattr(result, 'llm_output') and result.llm_output:
                        usage = result.llm_output.get('token_usage', {})
                        prompt_tokens = usage.get('prompt_tokens', 0)
                        completion_tokens = usage.get('completion_tokens', 0)
                    
                    # 如果还是无法获取，尝试从原始响应中提取
                    if prompt_tokens == 0 and completion_tokens == 0:
                        try:
                            # 尝试从原始响应中获取
                            if hasattr(result, '_response') and hasattr(result._response, 'usage'):
                                prompt_tokens = result._response.usage.prompt_tokens
                                completion_tokens = result._response.usage.completion_tokens
                        except:
                            pass
                    
                    # 如果仍然无法获取，使用估算值
                    if prompt_tokens == 0 and completion_tokens == 0:
                        # 估算token数量（简单估算：1个token约等于4个字符）
                        total_chars = sum(len(str(msg)) for msg in messages)
                        prompt_tokens = max(1, total_chars // 4)
                        
                        response_chars = len(str(result.content)) if hasattr(result, 'content') else 0
                        completion_tokens = max(1, response_chars // 4)
                    
                    # 计算使用量和成本
                    model_name = self.model.model_name if hasattr(self.model, 'model_name') else "unknown"
                    usage_data = handler.calculate_usage_and_cost(
                        model_name, prompt_tokens, completion_tokens
                    )
                    
                    # 创建generation记录
                    handler.create_generation(
                        name="model_generation",
                        input_data={
                            "model": model_name,
                            "messages": [str(msg) for msg in messages],
                            "temperature": self.model_params.get("temperature"),
                            **kwargs
                        },
                        output_data={
                            "response": str(result.content) if hasattr(result, 'content') else str(result),
                            "latency": latency
                        },
                        metadata={
                            "model": model_name,
                            "temperature": self.model_params.get("temperature"),
                            "latency_seconds": latency
                        },
                        usage=usage_data
                    )
                    
                    # 结束追踪
                    handler.end_trace(output={
                        "status": "success",
                        "latency": latency,
                        "usage": usage_data
                    })
                    
                    return result
                except Exception as e:
                    # 计算执行时间
                    end_time = time.time()
                    latency = end_time - start_time
                    
                    # 创建错误generation记录
                    handler.create_generation(
                        name="model_generation_error",
                        input_data={
                            "model": self.model.model_name if hasattr(self.model, 'model_name') else "unknown",
                            "messages": [str(msg) for msg in messages],
                            "temperature": self.model_params.get("temperature"),
                            **kwargs
                        },
                        output_data={"error": str(e)},
                        metadata={
                            "model": self.model.model_name if hasattr(self.model, 'model_name') else "unknown",
                            "error": True,
                            "latency_seconds": latency
                        }
                    )
                    
                    # 结束追踪
                    handler.end_trace(output={
                        "status": "error", 
                        "error": str(e),
                        "latency": latency
                    })
                    raise
            else:
                # Handler创建失败，直接调用
                return self.model.invoke(messages, **kwargs)
        else:
            # 不使用Langfuse追踪
            return self.model.invoke(messages, **kwargs)
    
    def flush(self):
        """刷新Langfuse数据"""
        if langfuse_manager.is_enabled():
            langfuse_manager.flush()

class EmbeddingModelFactory:
    """嵌入模型工厂类"""
    
    @classmethod
    def get_model(cls, model_name: str = None, use_azure: bool = False):
        """获取嵌入模型"""
        model_name = model_name or Config.EMBEDDING_MODEL_NAME
        
        if model_name.startswith("text-embedding"):
            if not use_azure:
                from langchain_openai import OpenAIEmbeddings
                return OpenAIEmbeddings(
                    model=model_name,
                    openai_api_key=Config.OPENAI_API_KEY,
                    openai_api_base=Config.OPENAI_BASE_URL
                )
            else:
                from langchain_openai import AzureOpenAIEmbeddings
                return AzureOpenAIEmbeddings(
                    azure_deployment=model_name,
                    openai_api_version="2024-05-01-preview",
                    openai_api_key=Config.OPENAI_API_KEY,
                    openai_api_base=Config.OPENAI_BASE_URL
                )
        else:
            # 使用自定义嵌入模型（如方舟）
            from rag.embedding_manager import ArkEmbeddings
            return ArkEmbeddings(
                api_key=Config.OPENAI_API_KEY,
                model_name=model_name
            )
    
    @classmethod
    def get_default_model(cls):
        """获取默认嵌入模型"""
        return cls.get_model(Config.EMBEDDING_MODEL_NAME) 