"""
Langfuse监控管理器 (2.x版本)
提供统一的LLM应用监控接口，支持追踪、评估和提示管理
基于Langfuse 2.x低级SDK实现
"""

import os
import logging
from typing import Dict, Any, Optional, List
from functools import wraps

# 配置日志级别，关闭Langfuse的debug日志
logging.getLogger("langfuse").setLevel(logging.WARNING)
logging.getLogger("langfuse._task_manager.media_manager").setLevel(logging.WARNING)
logging.getLogger("langfuse._task_manager").setLevel(logging.WARNING)
logging.getLogger("langfuse._task_manager.task_manager").setLevel(logging.WARNING)

# 检查Langfuse是否可用
try:
    from langfuse import Langfuse
    LANGFUSE_AVAILABLE = True
except ImportError:
    LANGFUSE_AVAILABLE = False
    logging.warning("Langfuse not available. Install with: pip install langfuse")

# 导入LangChain回调基类
try:
    from langchain_core.callbacks import BaseCallbackHandler
    LANGCHAIN_CALLBACKS_AVAILABLE = True
except ImportError:
    LANGCHAIN_CALLBACKS_AVAILABLE = False
    logging.warning("LangChain callbacks not available")

from config import Config

logger = logging.getLogger(__name__)

class LangfuseHandler(BaseCallbackHandler):
    """Langfuse处理器 - 用于LangChain集成"""
    
    def __init__(self, langfuse_client: Langfuse, trace_name: str = "langchain_trace", 
                 user_id: str = None, session_id: str = None):
        super().__init__()
        self.client = langfuse_client
        self.trace_name = trace_name
        self.user_id = user_id
        self.session_id = session_id
        self.trace = None
        self.current_span = None
        self.current_generation = None  # 添加generation引用
        
        # LangChain回调接口
        self.name = "langfuse_handler"
        self.always_verbose = True
    
    def setHandler(self, trace_name: str = None, user_id: str = None, session_id: str = None):
        """设置处理器参数"""
        if trace_name:
            self.trace_name = trace_name
        if user_id:
            self.user_id = user_id
            logger.info(f"设置user_id: {user_id}")
        if session_id:
            self.session_id = session_id
            logger.info(f"设置session_id: {session_id}")
        
        # 如果user_id或session_id被更新，总是重置trace以确保使用新的参数
        if user_id or session_id:
            logger.info("重置trace以应用新的user_id/session_id")
            self.trace = None
    
    def start_trace(self):
        """开始追踪"""
        if not self.trace:
            # 确保user_id和session_id不为空
            trace_kwargs = {"name": self.trace_name}
            if self.user_id:
                trace_kwargs["user_id"] = self.user_id
            if self.session_id:
                trace_kwargs["session_id"] = self.session_id
            
            # 添加调试日志
            logger.info(f"🔍 LangfuseHandler.start_trace 开始")
            logger.info(f"📋 trace参数: {trace_kwargs}")
            logger.info(f"👤 当前user_id: {self.user_id}")
            logger.info(f"🔗 当前session_id: {self.session_id}")
            
            self.trace = self.client.trace(**trace_kwargs)
            
            # 验证trace是否创建成功
            if self.trace:
                logger.info(f"✅ Langfuse trace创建成功: {self.trace_name}")
                logger.info(f"🆔 Trace ID: {getattr(self.trace, 'id', 'unknown')}")
            else:
                logger.error("❌ Langfuse trace创建失败")
        else:
            logger.info(f"ℹ️ Trace已存在，跳过创建")
        return self.trace
    
    def end_trace(self, output: Any = None):
        """结束追踪"""
        if self.trace and output:
            self.trace.update(output=output)
    
    def start_span(self, name: str, input_data: Any = None):
        """开始span"""
        if self.trace:
            self.current_span = self.trace.span(name=name, input=input_data)
        return self.current_span
    
    def end_span(self, output: Any = None):
        """结束span"""
        if self.current_span:
            self.current_span.end(output=output)
            self.current_span = None
    
    def create_generation(self, name: str, input_data: Any = None, 
                         output_data: Any = None, metadata: Dict[str, Any] = None,
                         usage: Dict[str, Any] = None):
        """创建生成记录"""
        if self.trace:
            # 准备参数
            kwargs = {
                "name": name,
                "input": input_data,
                "output": output_data,
                "metadata": metadata or {}
            }
            
            # 只有当usage包含有效数据时才添加
            if usage and isinstance(usage, dict) and len(usage) > 0:
                # 确保usage格式正确
                valid_usage = {}
                if "promptTokens" in usage and usage["promptTokens"] > 0:
                    valid_usage["promptTokens"] = usage["promptTokens"]
                if "completionTokens" in usage and usage["completionTokens"] > 0:
                    valid_usage["completionTokens"] = usage["completionTokens"]
                if "totalTokens" in usage and usage["totalTokens"] > 0:
                    valid_usage["totalTokens"] = usage["totalTokens"]
                
                if valid_usage:
                    kwargs["usage"] = valid_usage
            
            return self.trace.generation(**kwargs)
        return None
    
    def create_event(self, name: str, input_data: Any = None, 
                    metadata: Dict[str, Any] = None):
        """创建事件记录"""
        if self.trace:
            return self.trace.event(
                name=name,
                input=input_data,
                metadata=metadata or {}
            )
        return None
    
    # LangChain回调方法
    def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str], **kwargs):
        """LLM开始时的回调"""
        try:
            # 记录当前参数
            logger.info(f"🎯 LangfuseHandler.on_llm_start 开始")
            logger.info(f"👤 当前追踪参数 - user_id: {self.user_id}, session_id: {self.session_id}")
            logger.info(f"🤖 模型信息: {kwargs.get('model_name', 'unknown')}")
            logger.info(f"📝 提示词数量: {len(prompts)}")
            logger.info(f"📝 第一个提示词预览: {prompts[0][:200] if prompts else '无'}...")
            
            # 开始追踪
            logger.info(f"🔍 开始创建trace")
            self.start_trace()
            
            # 创建generation记录并保存引用
            logger.info(f"📊 创建generation记录")
            self.current_generation = self.create_generation(
                name=f"llm_generation_{kwargs.get('model_name', 'unknown')}",
                input_data={"prompts": prompts},
                metadata=kwargs
            )
            logger.info(f"✅ generation记录创建完成")
        except Exception as e:
            logger.error(f"❌ Langfuse on_llm_start error: {e}")
    
    def on_llm_end(self, response, **kwargs):
        """LLM结束时的回调"""
        try:
            logger.info(f"🎯 LangfuseHandler.on_llm_end 开始")
            
            # 提取token使用信息
            usage = {}
            if hasattr(response, 'response_metadata') and response.response_metadata:
                usage = response.response_metadata.get('usage', {})
                logger.info(f"📊 Token使用情况: {usage}")
            
            # 获取响应内容
            response_content = str(response.content) if hasattr(response, 'content') else str(response)
            logger.info(f"📄 响应内容长度: {len(response_content)}")
            logger.info(f"📄 响应内容预览: {response_content[:300]}...")
            
            # 更新generation记录
            if hasattr(self, 'current_generation') and self.current_generation:
                logger.info(f"📊 更新generation记录")
                self.current_generation.update(
                    output=response_content,
                    usage=usage if usage else None
                )
                self.current_generation = None
                logger.info(f"✅ generation记录更新完成")
            else:
                logger.warning(f"⚠️ 没有找到current_generation记录")
            
            # 结束追踪
            if self.trace:
                logger.info(f"🔚 结束trace")
                self.end_trace(output={
                    "response": response_content,
                    "usage": usage
                })
                logger.info(f"✅ trace结束完成")
            else:
                logger.warning(f"⚠️ 没有找到trace记录")
        except Exception as e:
            logger.error(f"❌ Langfuse on_llm_end error: {e}")
    
    def on_llm_error(self, error: Exception, **kwargs):
        """LLM错误时的回调"""
        try:
            if self.trace:
                self.end_trace(output={"error": str(error)})
        except Exception as e:
            logger.error(f"Langfuse on_llm_error error: {e}")
    
    def calculate_usage_and_cost(self, model: str, prompt_tokens: int = 0, 
                                completion_tokens: int = 0) -> Dict[str, Any]:
        """计算token使用量和成本 - 符合Langfuse 2.x格式"""
        total_tokens = prompt_tokens + completion_tokens
        
        # 基于模型计算成本（示例价格，实际价格可能不同）
        cost_per_1k_tokens = {
            "gpt-4": 0.03,  # 输入
            "gpt-4-turbo": 0.01,
            "gpt-3.5-turbo": 0.001,
            "doubao-1-5-pro-32k-250115": 0.001,  # 假设价格
        }
        
        # 获取模型的基础名称
        base_model = model.split("-")[0] if "-" in model else model
        cost_rate = cost_per_1k_tokens.get(base_model, 0.001)  # 默认价格
        
        total_cost = (total_tokens / 1000) * cost_rate
        
        # 返回符合Langfuse 2.x要求的格式
        return {
            "promptTokens": prompt_tokens,
            "completionTokens": completion_tokens,
            "totalTokens": total_tokens,
            "totalCost": total_cost
        }

class LangfuseManager:
    """Langfuse监控管理器 - 2.x版本"""
    
    def __init__(self):
        self._client = None
        self._setup_langfuse()
    
    def _setup_langfuse(self):
        """初始化Langfuse客户端"""
        if not LANGFUSE_AVAILABLE:
            logger.warning("Langfuse not available")
            return
            
        if not Config.LANGFUSE_ENABLED:
            logger.info("Langfuse monitoring disabled")
            return
            
        try:
            # 确保在初始化前禁用自动集成
            os.environ["LANGFUSE_OPENAI_INTEGRATION"] = "false"
            os.environ["LANGFUSE_AUTO_INSTRUMENT"] = "false"
            
            # 初始化Langfuse客户端，使用手动模式
            self._client = Langfuse(
                public_key=Config.LANGFUSE_PUBLIC_KEY,
                secret_key=Config.LANGFUSE_SECRET_KEY,
                host=Config.LANGFUSE_HOST,
                debug=False  # 关闭debug模式
            )
            logger.info("Langfuse monitoring initialized successfully (manual mode)")
            
        except Exception as e:
            logger.error(f"Failed to initialize Langfuse: {e}")
            self._client = None
    
    @property
    def client(self) -> Optional[Langfuse]:
        """获取Langfuse客户端"""
        return self._client
    
    def is_enabled(self) -> bool:
        """检查Langfuse是否启用"""
        return (LANGFUSE_AVAILABLE and 
                Config.LANGFUSE_ENABLED and 
                self._client is not None)
    
    def create_handler(self, trace_name: str = "langchain_trace", 
                      user_id: str = None, session_id: str = None) -> Optional[LangfuseHandler]:
        """创建Langfuse处理器"""
        if not self.is_enabled():
            return None
        
        return LangfuseHandler(
            langfuse_client=self._client,
            trace_name=trace_name,
            user_id=user_id,
            session_id=session_id
        )
    
    def trace_function(self, name: str = None, metadata: Dict[str, Any] = None):
        """函数追踪装饰器 - 使用Langfuse 2.x语法"""
        def decorator(func):
            @wraps(func)
            def wrapper(*args, **kwargs):
                if not self.is_enabled():
                    return func(*args, **kwargs)

                trace_name = name or f"{func.__module__}.{func.__name__}"
                
                # 从kwargs中提取user_id和session_id
                user_id = kwargs.pop("user_id", None)
                session_id = kwargs.pop("session_id", None)
                
                try:
                    # 准备trace参数
                    trace_kwargs = {"name": trace_name}
                    if user_id:
                        trace_kwargs["user_id"] = user_id
                    if session_id:
                        trace_kwargs["session_id"] = session_id
                    
                    # 使用Langfuse 2.x trace追踪函数
                    trace = self._client.trace(**trace_kwargs)
                    if metadata:
                        trace.update(metadata=metadata)
                    
                    # 将user_id和session_id重新添加到kwargs中，以便被装饰的函数可以使用
                    if user_id:
                        kwargs["user_id"] = user_id
                    if session_id:
                        kwargs["session_id"] = session_id
                    
                    result = func(*args, **kwargs)
                    
                    # 更新trace输出
                    trace.update(output={"result": str(result)[:1000]})
                    
                    return result
                except Exception as e:
                    logger.error(f"Failed to trace function {trace_name}: {e}")
                    return func(*args, **kwargs)
                    
            return wrapper
        return decorator
    
    def trace_rag_pipeline(self, query: str, retrieved_docs: List[Dict], 
                           response: str, metadata: Dict[str, Any] = None,
                           user_id: str = None, session_id: str = None):
        """追踪RAG管道 - 使用Langfuse 2.x语法"""
        if not self.is_enabled():
            return
        try:
            # 准备trace参数
            trace_kwargs = {"name": "rag_pipeline"}
            if user_id:
                trace_kwargs["user_id"] = user_id
            if session_id:
                trace_kwargs["session_id"] = session_id
            
            trace = self._client.trace(**trace_kwargs)
            trace.update(
                input={
                    "query": query,
                    "retrieved_docs_count": len(retrieved_docs)
                },
                metadata=metadata or {}
            )
            trace.update(
                output={
                    "response": response[:500] + "..." if len(response) > 500 else response
                }
            )
        except Exception as e:
            logger.error(f"Failed to trace RAG pipeline: {e}")
    
    def trace_workflow(self, workflow_name: str, steps: List[Dict], 
                      final_result: Any, metadata: Dict[str, Any] = None,
                      user_id: str = None, session_id: str = None):
        """追踪工作流 - 使用Langfuse 2.x语法"""
        if not self.is_enabled():
            return
        try:
            # 准备trace参数
            trace_kwargs = {"name": f"workflow_{workflow_name}"}
            if user_id:
                trace_kwargs["user_id"] = user_id
            if session_id:
                trace_kwargs["session_id"] = session_id
            
            trace = self._client.trace(**trace_kwargs)
            trace.update(
                input=metadata or {},
                metadata={
                    "workflow_name": workflow_name,
                    "total_steps": len(steps)
                }
            )
            
            # 为每个步骤创建子span
            for i, step in enumerate(steps):
                step_span = trace.span(
                    name=step.get("name", f"step_{i}"),
                    input=step.get("input", {}),
                    metadata={
                        "step_index": i,
                        "step_type": step.get("type", "unknown")
                    }
                )
                step_span.end(output=step.get("output", {}))
            
            trace.update(
                output={
                    "final_result": str(final_result)[:1000] if final_result else None,
                    "total_steps": len(steps),
                    "steps_summary": [step.get("name", f"step_{i}") for i, step in enumerate(steps)]
                }
            )
        except Exception as e:
            logger.error(f"Failed to trace workflow: {e}")
    
    def trace_llm_call(self, model: str, prompt: str, response: str, 
                       metadata: Dict[str, Any] = None, usage: Dict[str, Any] = None,
                       user_id: str = None, session_id: str = None):
        """追踪 LLM 调用 - 使用Langfuse 2.x语法"""
        if not self.is_enabled():
            return
        try:
            # 准备trace参数
            trace_kwargs = {"name": f"llm_call_{model}"}
            if user_id:
                trace_kwargs["user_id"] = user_id
            if session_id:
                trace_kwargs["session_id"] = session_id
            
            trace = self._client.trace(**trace_kwargs)
            trace.update(
                input={
                    "model": model,
                    "prompt": prompt[:500] + "..." if len(prompt) > 500 else prompt
                },
                metadata=metadata or {}
            )
            trace.update(
                output={
                    "response": response[:500] + "..." if len(response) > 500 else response,
                    "usage": usage or {}
                }
            )
        except Exception as e:
            logger.error(f"Failed to trace LLM call: {e}")
    
    def score_trace(self, trace_id: str, name: str, value: float, 
                   comment: str = None):
        """为追踪记录评分 - 使用Langfuse 2.x语法"""
        if not self.is_enabled():
            return
        
        try:
            self._client.score(
                trace_id=trace_id,
                name=name,
                value=value,
                comment=comment
            )
        except Exception as e:
            logger.error(f"Failed to create score: {e}")
    
    def flush(self):
        """刷新数据到Langfuse"""
        if self.is_enabled():
            try:
                self._client.flush()
                logger.info("Langfuse data flushed successfully")
            except Exception as e:
                logger.error(f"Failed to flush Langfuse data: {e}")

# 创建单例实例
langfuse_manager = LangfuseManager()

# 导出便捷函数
def trace_function(name: str = None, metadata: Dict[str, Any] = None):
    """函数追踪装饰器"""
    return langfuse_manager.trace_function(name, metadata)

def trace_rag_pipeline(query: str, retrieved_docs: List[Dict], 
                       response: str, metadata: Dict[str, Any] = None,
                       user_id: str = None, session_id: str = None):
    """追踪RAG管道"""
    return langfuse_manager.trace_rag_pipeline(query, retrieved_docs, response, metadata, user_id, session_id)

def trace_workflow(workflow_name: str, steps: List[Dict], 
                  final_result: Any, metadata: Dict[str, Any] = None,
                  user_id: str = None, session_id: str = None):
    """追踪工作流"""
    return langfuse_manager.trace_workflow(workflow_name, steps, final_result, metadata, user_id, session_id)

def trace_llm_call(model: str, prompt: str, response: str, 
                   metadata: Dict[str, Any] = None, usage: Dict[str, Any] = None,
                   user_id: str = None, session_id: str = None):
    """追踪 LLM 调用"""
    return langfuse_manager.trace_llm_call(model, prompt, response, metadata, usage, user_id, session_id)

def score_trace(trace_id: str, name: str, value: float, comment: str = None):
    """为追踪记录评分"""
    return langfuse_manager.score_trace(trace_id, name, value, comment)

def flush_langfuse():
    """刷新Langfuse数据"""
    return langfuse_manager.flush() 