from typing import Dict, Any, Optional, List
from datetime import datetime
import asyncio
import json
from dataclasses import dataclass, asdict
import langsmith
from langsmith import Client
from langsmith.schemas import Run, RunTypeEnum
import uuid

from utils.logger import Logger
from utils.exceptions import MonitoringError

logger = get_logger("langsmith_monitor")


@dataclass
class MonitoringConfig:
    """监控配置"""
    api_key: str = "your-langsmith-api-key"
    project_name: str = "maess-flow"
    endpoint: str = "https://api.smith.langchain.com"
    enable_tracing: bool = True
    enable_evaluations: bool = True
    sample_rate: float = 1.0  # 采样率
    max_retries: int = 3
    timeout: int = 30


class LangSmithMonitor:
    """LangSmith监控和链路追踪"""

    def __init__(self, config: Optional[MonitoringConfig] = None):
        self.config = config or MonitoringConfig()
        self.client = None
        self._initialize_client()

    def _initialize_client(self):
        """初始化LangSmith客户端"""
        try:
            self.client = Client(
                api_key=self.config.api_key,
                api_url=self.config.endpoint
            )
            
            # 测试连接
            self.client.get_project(project_name=self.config.project_name)
            
            logger.info("LangSmith客户端初始化完成")
            
        except Exception as e:
            logger.error(f"LangSmith客户端初始化失败: {str(e)}")
            raise MonitoringError(f"LangSmith客户端初始化失败: {str(e)}")

    def start_trace(self, name: str, run_type: str, inputs: Dict[str, Any],
                   parent_run_id: Optional[str] = None) -> str:
        """开始追踪"""
        if not self.config.enable_tracing:
            return None

        try:
            run_id = str(uuid.uuid4())
            
            # 创建运行记录
            run = self.client.create_run(
                name=name,
                run_type=run_type,
                inputs=inputs,
                project_name=self.config.project_name,
                id=run_id,
                parent_run_id=parent_run_id
            )
            
            logger.debug(f"追踪开始: run_id={run_id}, name={name}")
            return run_id
            
        except Exception as e:
            logger.error(f"开始追踪失败: {str(e)}")
            return None

    def end_trace(self, run_id: str, outputs: Optional[Dict[str, Any]] = None,
                  error: Optional[str] = None, end_time: Optional[datetime] = None):
        """结束追踪"""
        if not self.config.enable_tracing or not run_id:
            return

        try:
            self.client.update_run(
                run_id=run_id,
                outputs=outputs,
                error=error,
                end_time=end_time or datetime.utcnow()
            )
            
            logger.debug(f"追踪结束: run_id={run_id}")
            
        except Exception as e:
            logger.error(f"结束追踪失败: {str(e)}")

    def log_event(self, event_name: str, event_data: Dict[str, Any],
                  run_id: Optional[str] = None, timestamp: Optional[datetime] = None):
        """记录事件"""
        try:
            event = {
                "event_name": event_name,
                "event_data": event_data,
                "timestamp": timestamp or datetime.utcnow(),
                "run_id": run_id
            }
            
            # 这里可以将事件发送到LangSmith或其他监控系统
            logger.info(f"事件记录: {event_name}")
            
        except Exception as e:
            logger.error(f"记录事件失败: {str(e)}")

    def log_agent_execution(self, agent_name: str, task_id: str,
                          inputs: Dict[str, Any], outputs: Optional[Dict[str, Any]] = None,
                          error: Optional[str] = None, execution_time: Optional[float] = None):
        """记录智能体执行"""
        try:
            run_id = self.start_trace(
                name=f"agent_{agent_name}",
                run_type="agent",
                inputs=inputs
            )
            
            if run_id:
                # 记录智能体特定的指标
                metrics = {
                    "agent_name": agent_name,
                    "task_id": task_id,
                    "execution_time": execution_time,
                    "status": "success" if not error else "failed"
                }
                
                self.log_event("agent_execution", metrics, run_id)
                
                # 结束追踪
                self.end_trace(run_id, outputs, error)
            
        except Exception as e:
            logger.error(f"记录智能体执行失败: {str(e)}")

    def log_tool_usage(self, tool_name: str, inputs: Dict[str, Any],
                      outputs: Optional[Dict[str, Any]] = None,
                      error: Optional[str] = None, execution_time: Optional[float] = None):
        """记录工具使用"""
        try:
            run_id = self.start_trace(
                name=f"tool_{tool_name}",
                run_type="tool",
                inputs=inputs
            )
            
            if run_id:
                # 记录工具特定的指标
                metrics = {
                    "tool_name": tool_name,
                    "execution_time": execution_time,
                    "status": "success" if not error else "failed"
                }
                
                self.log_event("tool_usage", metrics, run_id)
                
                # 结束追踪
                self.end_trace(run_id, outputs, error)
            
        except Exception as e:
            logger.error(f"记录工具使用失败: {str(e)}")

    def log_workflow_execution(self, workflow_name: str, workflow_id: str,
                             inputs: Dict[str, Any], outputs: Optional[Dict[str, Any]] = None,
                             error: Optional[str] = None, execution_time: Optional[float] = None):
        """记录工作流执行"""
        try:
            run_id = self.start_trace(
                name=f"workflow_{workflow_name}",
                run_type="chain",
                inputs=inputs
            )
            
            if run_id:
                # 记录工作流特定的指标
                metrics = {
                    "workflow_name": workflow_name,
                    "workflow_id": workflow_id,
                    "execution_time": execution_time,
                    "status": "success" if not error else "failed",
                    "total_steps": outputs.get("total_steps", 0) if outputs else 0
                }
                
                self.log_event("workflow_execution", metrics, run_id)
                
                # 结束追踪
                self.end_trace(run_id, outputs, error)
            
        except Exception as e:
            logger.error(f"记录工作流执行失败: {str(e)}")

    def log_performance_metrics(self, metric_name: str, metric_value: float,
                              labels: Optional[Dict[str, str]] = None,
                              timestamp: Optional[datetime] = None):
        """记录性能指标"""
        try:
            metrics_data = {
                "metric_name": metric_name,
                "metric_value": metric_value,
                "labels": labels or {},
                "timestamp": timestamp or datetime.utcnow()
            }
            
            self.log_event("performance_metrics", metrics_data)
            
        except Exception as e:
            logger.error(f"记录性能指标失败: {str(e)}")

    def log_error(self, error_type: str, error_message: str,
                 context: Optional[Dict[str, Any]] = None,
                 run_id: Optional[str] = None):
        """记录错误"""
        try:
            error_data = {
                "error_type": error_type,
                "error_message": error_message,
                "context": context or {},
                "timestamp": datetime.utcnow()
            }
            
            self.log_event("error", error_data, run_id)
            
            logger.error(f"错误记录: {error_type} - {error_message}")
            
        except Exception as e:
            logger.error(f"记录错误失败: {str(e)}")

    def create_evaluation_dataset(self, dataset_name: str, description: str) -> str:
        """创建评估数据集"""
        try:
            dataset = self.client.create_dataset(
                dataset_name=dataset_name,
                description=description
            )
            
            logger.info(f"评估数据集创建完成: {dataset_name}")
            return dataset.id
            
        except Exception as e:
            logger.error(f"创建评估数据集失败: {str(e)}")
            raise MonitoringError(f"创建评估数据集失败: {str(e)}")

    def add_evaluation_example(self, dataset_id: str, inputs: Dict[str, Any],
                             outputs: Dict[str, Any], metadata: Optional[Dict[str, Any]] = None):
        """添加评估示例"""
        try:
            self.client.create_example(
                inputs=inputs,
                outputs=outputs,
                dataset_id=dataset_id,
                metadata=metadata or {}
            )
            
            logger.debug(f"评估示例添加完成到数据集: {dataset_id}")
            
        except Exception as e:
            logger.error(f"添加评估示例失败: {str(e)}")

    def run_evaluation(self, dataset_id: str, run_name: str,
                      evaluator_config: Dict[str, Any]) -> str:
        """运行评估"""
        try:
            evaluation = self.client.evaluate(
                dataset_id=dataset_id,
                run_name=run_name,
                evaluators=[evaluator_config]
            )
            
            logger.info(f"评估运行完成: {run_name}")
            return evaluation.id
            
        except Exception as e:
            logger.error(f"运行评估失败: {str(e)}")
            raise MonitoringError(f"运行评估失败: {str(e)}")

    def get_run_stats(self, run_id: str) -> Dict[str, Any]:
        """获取运行统计"""
        try:
            run = self.client.read_run(run_id)
            
            stats = {
                "run_id": run_id,
                "name": run.name,
                "run_type": run.run_type,
                "status": run.status,
                "start_time": run.start_time.isoformat() if run.start_time else None,
                "end_time": run.end_time.isoformat() if run.end_time else None,
                "error": run.error,
                "inputs": run.inputs,
                "outputs": run.outputs,
                "feedback_stats": run.feedback_stats
            }
            
            return stats
            
        except Exception as e:
            logger.error(f"获取运行统计失败: {str(e)}")
            return {}

    def get_project_stats(self, project_name: Optional[str] = None) -> Dict[str, Any]:
        """获取项目统计"""
        try:
            project_name = project_name or self.config.project_name
            
            # 获取项目运行统计
            runs = self.client.list_runs(project_name=project_name, limit=100)
            
            total_runs = len(runs)
            successful_runs = sum(1 for run in runs if run.error is None)
            failed_runs = total_runs - successful_runs
            
            # 计算平均执行时间
            execution_times = []
            for run in runs:
                if run.start_time and run.end_time:
                    execution_time = (run.end_time - run.start_time).total_seconds()
                    execution_times.append(execution_time)
            
            avg_execution_time = sum(execution_times) / len(execution_times) if execution_times else 0
            
            stats = {
                "project_name": project_name,
                "total_runs": total_runs,
                "successful_runs": successful_runs,
                "failed_runs": failed_runs,
                "success_rate": successful_runs / total_runs if total_runs > 0 else 0,
                "average_execution_time": avg_execution_time,
                "timestamp": datetime.utcnow().isoformat()
            }
            
            return stats
            
        except Exception as e:
            logger.error(f"获取项目统计失败: {str(e)}")
            return {}

    def export_traces(self, output_file: str, format_type: str = "json",
                     start_time: Optional[datetime] = None,
                     end_time: Optional[datetime] = None):
        """导出追踪数据"""
        try:
            # 获取时间范围内的运行记录
            runs = self.client.list_runs(
                project_name=self.config.project_name,
                start_time=start_time,
                end_time=end_time,
                limit=1000
            )
            
            traces = []
            for run in runs:
                trace = {
                    "run_id": str(run.id),
                    "name": run.name,
                    "run_type": run.run_type,
                    "status": "success" if run.error is None else "failed",
                    "error": run.error,
                    "start_time": run.start_time.isoformat() if run.start_time else None,
                    "end_time": run.end_time.isoformat() if run.end_time else None,
                    "inputs": run.inputs,
                    "outputs": run.outputs
                }
                traces.append(trace)
            
            # 导出到文件
            if format_type.lower() == "json":
                import json
                with open(output_file, 'w', encoding='utf-8') as f:
                    json.dump(traces, f, ensure_ascii=False, indent=2)
            else:
                # 可以添加其他格式支持
                raise ValueError(f"不支持的导出格式: {format_type}")
            
            logger.info(f"追踪数据导出完成: {output_file}, 共 {len(traces)} 条记录")
            
        except Exception as e:
            logger.error(f"导出追踪数据失败: {str(e)}")
            raise MonitoringError(f"导出追踪数据失败: {str(e)}")

    async def health_check(self) -> Dict[str, Any]:
        """健康检查"""
        try:
            # 测试LangSmith连接
            project = self.client.get_project(project_name=self.config.project_name)
            
            return {
                "status": "healthy",
                "langsmith": {
                    "status": "connected",
                    "project_name": self.config.project_name,
                    "project_id": project.id
                },
                "config": {
                    "tracing_enabled": self.config.enable_tracing,
                    "evaluations_enabled": self.config.enable_evaluations,
                    "sample_rate": self.config.sample_rate
                },
                "timestamp": datetime.utcnow().isoformat()
            }
            
        except Exception as e:
            logger.error(f"健康检查失败: {str(e)}")
            return {
                "status": "unhealthy",
                "error": str(e),
                "timestamp": datetime.utcnow().isoformat()
            }


# 全局监控器实例
langsmith_monitor = None


async def get_langsmith_monitor() -> LangSmithMonitor:
    """获取LangSmith监控器实例"""
    global langsmith_monitor
    if langsmith_monitor is None:
        langsmith_monitor = LangSmithMonitor()
    return langsmith_monitor