# encoding: utf-8
# @File  : monitor.py
# @Author: shaoyun
# @Date  :  2025/05/11
import asyncio
from datetime import datetime, timedelta
from typing import Dict, List, Any

from loguru import logger
from prometheus_client import Counter, Gauge, Histogram, push_to_gateway, CollectorRegistry
from sqlalchemy.orm import Session

from app.core.config import settings
from app.db.mongo_manager import MongoDB
from app.models.node import Node, NodeStatus
from app.models.task import Task, TaskStatus


class Monitor:
    """性能监控模块"""

    # 使用独立的注册表，避免指标重复
    registry = CollectorRegistry()

    # Prometheus指标定义
    task_gauge = Gauge(
        'crawler_tasks_current',
        'Current number of crawler tasks by status',
        ['status'],
        registry=registry
    )

    task_total_counter = Counter(
        'crawler_tasks_total',
        'Total number of crawler tasks created',
        ['status'],
        registry=registry
    )

    node_gauge = Gauge(
        'crawler_nodes',
        'Number of crawler nodes',
        ['status'],
        registry=registry
    )

    cpu_usage_gauge = Gauge(
        'crawler_node_cpu_usage',
        'CPU usage of crawler nodes',
        ['node_id', 'node_name'],
        registry=registry
    )

    memory_usage_gauge = Gauge(
        'crawler_node_memory_usage',
        'Memory usage of crawler nodes',
        ['node_id', 'node_name'],
        registry=registry
    )

    task_duration = Histogram(
        'crawler_task_duration_seconds',
        'Task execution duration in seconds',
        ['spider_id'],
        buckets=(1, 5, 10, 30, 60, 120, 300, 600, 1800, 3600),
        registry=registry
    )

    # 新增指标
    task_queue_size = Gauge(
        'crawler_task_queue_size',
        'Size of task queue',
        registry=registry
    )

    active_tasks = Gauge(
        'crawler_active_tasks',
        'Number of currently running tasks',
        registry=registry
    )

    # 控制是否推送到Prometheus
    ENABLE_PROMETHEUS_PUSH = settings.PUSHGATEWAY_PORT is not None

    @staticmethod
    async def collect_metrics(db: Session):
        """收集系统指标并推送到Prometheus"""
        try:
            # 收集任务指标
            for status in TaskStatus:
                count = db.query(Task).filter(Task.status == status).count()
                Monitor.task_gauge.labels(status=status.value).set(count)

                if status == TaskStatus.RUNNING:
                    Monitor.active_tasks.set(count)

            # 收集节点指标
            for status in NodeStatus:
                count = db.query(Node).filter(Node.status == status).count()
                Monitor.node_gauge.labels(status=status.value).set(count)

            # 收集节点资源使用情况
            nodes = db.query(Node).filter(Node.status == NodeStatus.ONLINE).all()
            for node in nodes:
                Monitor.cpu_usage_gauge.labels(
                    node_id=node.id,
                    node_name=node.name
                ).set(node.cpu_usage or 0)

                Monitor.memory_usage_gauge.labels(
                    node_id=node.id,
                    node_name=node.name
                ).set(node.memory_usage or 0)

            # 收集最近完成的任务执行时间
            one_hour_ago = datetime.utcnow() - timedelta(hours=1)
            recent_completed_tasks = db.query(Task).filter(
                Task.status == TaskStatus.COMPLETED,
                Task.started_at.isnot(None),
                Task.finished_at.isnot(None),
                Task.finished_at >= one_hour_ago
            ).all()

            for task in recent_completed_tasks:
                duration = (task.finished_at - task.started_at).total_seconds()
                Monitor.task_duration.labels(spider_id=task.spider_id).observe(duration)

            # 从Redis获取队列大小
            try:
                from app.db.redis_manager import RedisClient
                queue_size = await RedisClient.get_queue_size()  # 使用 await
                Monitor.task_queue_size.set(queue_size)
            except Exception as e:
                logger.debug(f"Could not get queue size: {e}")

            # 推送到Prometheus Pushgateway
            if Monitor.ENABLE_PROMETHEUS_PUSH:
                push_gateway_url = f"http://localhost:{settings.PUSHGATEWAY_PORT}"
                try:
                    push_to_gateway(
                        push_gateway_url,
                        job="crawler_manager",
                        registry=Monitor.registry
                    )
                    logger.debug("Metrics pushed to Prometheus Pushgateway")
                except Exception as e:
                    logger.debug(f"Could not push metrics to Prometheus: {e}")
            else:
                logger.debug("Prometheus push disabled, metrics collected locally")

        except Exception as e:
            logger.error(f"Error collecting metrics: {e}")

    @staticmethod
    async def check_alerts(db: Session) -> List[Dict[str, Any]]:
        """检查告警条件并生成告警"""
        alerts = []

        try:
            # 检查节点状态告警
            offline_nodes = db.query(Node).filter(Node.status == NodeStatus.OFFLINE).all()
            if offline_nodes:
                alerts.append({
                    "level": "warning",
                    "type": "node_offline",
                    "message": f"{len(offline_nodes)} 个节点离线",
                    "details": [{"node_id": node.id, "name": node.name} for node in offline_nodes],
                    "timestamp": datetime.utcnow().isoformat()
                })

            # 检查高CPU使用率告警
            high_cpu_nodes = db.query(Node).filter(
                Node.status == NodeStatus.ONLINE,
                Node.cpu_usage > settings.CPU_THRESHOLD
            ).all()

            if high_cpu_nodes:
                alerts.append({
                    "level": "warning",
                    "type": "high_cpu_usage",
                    "message": f"{len(high_cpu_nodes)} 个节点CPU使用率过高",
                    "details": [
                        {"node_id": node.id, "name": node.name, "cpu_usage": node.cpu_usage}
                        for node in high_cpu_nodes
                    ],
                    "timestamp": datetime.utcnow().isoformat()
                })

            # 检查高内存使用率告警
            high_memory_nodes = db.query(Node).filter(
                Node.status == NodeStatus.ONLINE,
                Node.memory_usage > settings.MEMORY_THRESHOLD
            ).all()

            if high_memory_nodes:
                alerts.append({
                    "level": "warning",
                    "type": "high_memory_usage",
                    "message": f"{len(high_memory_nodes)} 个节点内存使用率过高",
                    "details": [
                        {"node_id": node.id, "name": node.name, "memory_usage": node.memory_usage}
                        for node in high_memory_nodes
                    ],
                    "timestamp": datetime.utcnow().isoformat()
                })

            # 检查任务失败率告警
            one_hour_ago = datetime.utcnow() - timedelta(hours=1)

            total_tasks = db.query(Task).filter(
                Task.created_at >= one_hour_ago
            ).count()

            failed_tasks = db.query(Task).filter(
                Task.status == TaskStatus.FAILED,
                Task.created_at >= one_hour_ago
            ).count()

            if total_tasks > 10 and (failed_tasks / total_tasks) > 0.1:
                alerts.append({
                    "level": "error",
                    "type": "high_failure_rate",
                    "message": f"任务失败率过高: {failed_tasks}/{total_tasks} ({failed_tasks / total_tasks:.1%})",
                    "details": {
                        "total_tasks": total_tasks,
                        "failed_tasks": failed_tasks,
                        "failure_rate": failed_tasks / total_tasks
                    },
                    "timestamp": datetime.utcnow().isoformat()
                })

            # 检查任务队列堆积
            try:
                from app.db.redis_manager import RedisClient
                queue_size = await RedisClient.get_queue_size()  # 使用 await
                if queue_size > 100:
                    alerts.append({
                        "level": "warning",
                        "type": "queue_backlog",
                        "message": f"任务队列堆积: {queue_size} 个任务等待处理",
                        "details": {"queue_size": queue_size},
                        "timestamp": datetime.utcnow().isoformat()
                    })
            except Exception as e:
                logger.debug(f"Could not check queue size: {e}")

            # 如果有告警，存储到MongoDB
            if alerts:
                for alert in alerts:
                    existing = await MongoDB.find_one(
                        "alerts",
                        {
                            "type": alert["type"],
                            "timestamp": {"$gte": (datetime.utcnow() - timedelta(minutes=10)).isoformat()}
                        }
                    )
                    if not existing:
                        await MongoDB.insert_one("alerts", alert)
                        logger.warning(f"告警生成: {alert['type']} - {alert['message']}")

            return alerts

        except Exception as e:
            logger.error(f"Error checking alerts: {e}")
            return []

    @staticmethod
    async def get_system_metrics(db: Session, time_range: str = "hour") -> Dict[str, Any]:
        try:
            now = datetime.utcnow()

            if time_range == "hour":
                start_time = now - timedelta(hours=1)
            elif time_range == "day":
                start_time = now - timedelta(days=1)
            elif time_range == "week":
                start_time = now - timedelta(weeks=1)
            else:
                start_time = now - timedelta(hours=1)

            total_tasks = db.query(Task).filter(Task.created_at >= start_time).count()

            task_stats = {}
            for status in TaskStatus:
                count = db.query(Task).filter(
                    Task.status == status,
                    Task.created_at >= start_time
                ).count()
                task_stats[status.value] = count

            node_stats = {}
            for status in NodeStatus:
                count = db.query(Node).filter(Node.status == status).count()
                node_stats[status.value] = count

            completed_tasks = db.query(Task).filter(
                Task.status == TaskStatus.COMPLETED,
                Task.started_at.isnot(None),
                Task.finished_at.isnot(None),
                Task.created_at >= start_time
            ).all()

            execution_times = []
            for task in completed_tasks:
                duration = (task.finished_at - task.started_at).total_seconds()
                execution_times.append(duration)

            avg_execution_time = sum(execution_times) / len(execution_times) if execution_times else 0
            min_execution_time = min(execution_times) if execution_times else 0
            max_execution_time = max(execution_times) if execution_times else 0

            online_nodes = db.query(Node).filter(Node.status == NodeStatus.ONLINE).all()

            avg_cpu_usage = sum(node.cpu_usage or 0 for node in online_nodes) / len(online_nodes) if online_nodes else 0
            avg_memory_usage = sum(node.memory_usage or 0 for node in online_nodes) / len(
                online_nodes) if online_nodes else 0

            # 获取队列信息
            try:
                from app.db.redis_manager import RedisClient
                queue_size = await RedisClient.get_queue_size()  # 使用 await
            except Exception as e:
                logger.debug(f"Could not get queue size: {e}")
                queue_size = 0

            recent_alerts = await MongoDB.find_many(
                "alerts",
                {"timestamp": {"$gte": start_time.isoformat()}},
                limit=10
            )

            recent_alerts = sorted(
                recent_alerts,
                key=lambda x: x.get('timestamp', ''),
                reverse=True
            )

            health_score = 100

            failed_tasks_count = task_stats.get("failed", 0)
            if failed_tasks_count > 0:
                health_score -= min(30, failed_tasks_count * 5)

            offline_node_count = node_stats.get("offline", 0)
            if offline_node_count > 0:
                health_score -= offline_node_count * 10

            if avg_cpu_usage > 80:
                health_score -= 20

            if avg_memory_usage > 80:
                health_score -= 20

            health_score = max(0, health_score)

            return {
                "time_range": time_range,
                "start_time": start_time.isoformat(),
                "end_time": now.isoformat(),
                "health_score": health_score,
                "tasks": {
                    "total": total_tasks,
                    "by_status": task_stats,
                    "queue_size": queue_size
                },
                "nodes": {
                    "total": sum(node_stats.values()),
                    "by_status": node_stats,
                    "avg_cpu_usage": round(avg_cpu_usage, 2),
                    "avg_memory_usage": round(avg_memory_usage, 2)
                },
                "performance": {
                    "avg_execution_time": round(avg_execution_time, 2),
                    "min_execution_time": round(min_execution_time, 2),
                    "max_execution_time": round(max_execution_time, 2),
                    "task_success_rate": round(task_stats.get("completed", 0) / total_tasks if total_tasks else 0, 2)
                },
                "alerts": recent_alerts
            }

        except Exception as e:
            logger.error(f"Error getting system metrics: {e}")
            return {"error": str(e)}

    @staticmethod
    async def run_monitoring_loop():
        """运行监控循环"""
        from app.db.mysql import SessionLocal

        logger.info("Starting monitoring loop...")

        while True:
            try:
                db = SessionLocal()

                # 收集指标
                await Monitor.collect_metrics(db)

                # 检查告警
                await Monitor.check_alerts(db)

                db.close()

                # 每分钟执行一次
                await asyncio.sleep(60)

            except Exception as e:
                logger.error(f"Error in monitoring loop: {e}")
                await asyncio.sleep(10)  # 出错后等待较短时间再次尝试