import time
import psutil
import socket
from typing import Dict, List, Any, Optional
import logging
from datetime import datetime
import json
from pathlib import Path
import requests
from concurrent.futures import ThreadPoolExecutor
import traceback
from system_integration import CareerDevelopmentSystem

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger("SystemMonitor")

class SystemMonitor:
    """系统监控与运维管理"""
    def __init__(self, system: CareerDevelopmentSystem):
        self.system = system
        self.monitor_interval = 60  # 监控间隔(秒)
        self.alert_thresholds = {
            "cpu": 90,  # CPU使用率阈值(%)
            "memory": 85,  # 内存使用率阈值(%)
            "disk": 90,  # 磁盘使用率阈值(%)
            "response_time": 5  # 服务响应时间阈值(秒)
        }
        self.health_status = {
            "overall": "green",
            "components": {},
            "last_checked": None
        }
        self._init_monitoring()
        
    def _init_monitoring(self):
        """初始化监控配置"""
        self.monitor_dir = Path("b/monitoring")
        self.monitor_dir.mkdir(parents=True, exist_ok=True)
        
        # 监控端点配置
        self.endpoints = {
            "resume_parser": {
                "health_check": self._check_resume_parser,
                "metrics": ["processing_time", "success_rate"]
            },
            "kafka_pipeline": {
                "health_check": self._check_kafka_pipeline,
                "metrics": ["lag", "throughput"]
            },
            "ray_cluster": {
                "health_check": self._check_ray_cluster,
                "metrics": ["nodes", "task_load"]
            }
        }
        
        logger.info("系统监控初始化完成")
    
    def start_monitoring(self):
        """启动监控服务"""
        logger.info("启动系统监控服务...")
        self.running = True
        
        while self.running:
            try:
                # 执行监控检查
                self._perform_health_checks()
                
                # 收集系统指标
                self._collect_system_metrics()
                
                # 检查警报条件
                self._check_alert_conditions()
                
                # 保存监控状态
                self._save_monitoring_data()
                
                # 间隔等待
                time.sleep(self.monitor_interval)
                
            except Exception as e:
                logger.error(f"监控循环出错: {str(e)}", exc_info=True)
                time.sleep(10)  # 出错后等待
    
    def _perform_health_checks(self):
        """执行健康检查"""
        checks = {}
        
        # 检查系统资源
        checks["system_resources"] = self._check_system_resources()
        
        # 检查各组件
        for name, config in self.endpoints.items():
            try:
                checks[name] = config["health_check"]()
            except Exception as e:
                checks[name] = {
                    "status": "red",
                    "error": str(e),
                    "stacktrace": traceback.format_exc()
                }
        
        # 更新健康状态
        self.health_status["components"] = checks
        self.health_status["last_checked"] = datetime.now().isoformat()
        
        # 确定总体状态
        if any(c["status"] == "red" for c in checks.values()):
            self.health_status["overall"] = "red"
        elif any(c["status"] == "yellow" for c in checks.values()):
            self.health_status["overall"] = "yellow"
        else:
            self.health_status["overall"] = "green"
    
    def _check_system_resources(self) -> Dict[str, Any]:
        """检查系统资源使用情况"""
        cpu_percent = psutil.cpu_percent(interval=1)
        mem = psutil.virtual_memory()
        disk = psutil.disk_usage("/")
        
        return {
            "cpu_usage": cpu_percent,
            "mem_usage": mem.percent,
            "disk_usage": disk.percent,
            "status": (
                "red" if cpu_percent > self.alert_thresholds["cpu"] or 
                       mem.percent > self.alert_thresholds["memory"] or
                       disk.percent > self.alert_thresholds["disk"]
                else "green"
            ),
            "timestamp": datetime.now().isoformat()
        }
    
    def _check_resume_parser(self) -> Dict[str, Any]:
        """检查简历解析服务状态"""
        # 测试解析
        test_file = Path("b/data/resumes/sample_resume.pdf")
        if not test_file.exists():
            test_file.write_text("示例简历内容\n技能: Python, SQL")
            
        start_time = time.time()
        try:
            result = self.system.resume_parser.parse_resume(str(test_file))
            processing_time = time.time() - start_time
            
            return {
                "status": "green",
                "processing_time": processing_time,
                "success": True,
                "response": result
            }
        except Exception as e:
            return {
                "status": "red",
                "error": str(e),
                "processing_time": time.time() - start_time
            }
    
    def _check_kafka_pipeline(self) -> Dict[str, Any]:
        """检查Kafka管道状态"""
        try:
            lag = self.system.kafka_pipeline._get_consumer_lag()
            total_lag = sum(lag.values())
            
            return {
                "status": "yellow" if total_lag > 100 else "green",
                "lag": total_lag,
                "partitions": lag
            }
        except Exception as e:
            return {
                "status": "red",
                "error": str(e)
            }
    
    def _check_ray_cluster(self) -> Dict[str, Any]:
        """检查Ray集群状态"""
        try:
            if not ray.is_initialized():
                return {
                    "status": "red",
                    "error": "Ray未初始化"
                }
                
            nodes = ray.nodes()
            resources = ray.cluster_resources()
            
            return {
                "status": "green",
                "nodes": len(nodes),
                "resources": resources,
                "available": ray.available_resources()
            }
        except Exception as e:
            return {
                "status": "red",
                "error": str(e)
            }
    
    def _collect_system_metrics(self):
        """收集系统指标"""
        metrics = {
            "timestamp": datetime.now().isoformat(),
            "cpu": psutil.cpu_percent(interval=1),
            "memory": psutil.virtual_memory().percent,
            "disk": psutil.disk_usage("/").percent,
            "network": psutil.net_io_counters(),
            "processes": len(psutil.pids())
        }
        
        # 保存指标
        metrics_file = self.monitor_dir / f"metrics_{datetime.now().strftime('%Y%m%d')}.json"
        with open(metrics_file, "a") as f:
            f.write(json.dumps(metrics) + "\n")
    
    def _check_alert_conditions(self):
        """检查警报条件并触发相应操作"""
        resources = self.health_status["components"]["system_resources"]
        
        # CPU警报
        if resources["cpu_usage"] > self.alert_thresholds["cpu"]:
            self._trigger_alert("high_cpu", resources["cpu_usage"])
            
        # 内存警报
        if resources["mem_usage"] > self.alert_thresholds["memory"]:
            self._trigger_alert("high_memory", resources["mem_usage"])
            
        # 组件故障
        for name, check in self.health_status["components"].items():
            if check.get("status") == "red":
                self._trigger_alert(f"component_failure:{name}", check)
    
    def _trigger_alert(self, alert_type: str, value: Any):
        """触发警报"""
        alert = {
            "type": alert_type,
            "value": value,
            "timestamp": datetime.now().isoformat(),
            "system": self.system.system_id
        }
        
        # 保存警报
        alert_file = self.monitor_dir / "alerts.json"
        with open(alert_file, "a") as f:
            f.write(json.dumps(alert) + "\n")
            
        # 执行修复操作
        self._attempt_recovery(alert_type)
        
        logger.warning(f"触发警报: {alert_type} = {value}")
    
    def _attempt_recovery(self, alert_type: str):
        """尝试自动恢复"""
        if alert_type == "high_cpu":
            # 减少后台任务
            self.system.executor._max_workers = max(
                2, self.system.executor._max_workers - 2
            )
            logger.info(f"减少工作线程到 {self.system.executor._max_workers}")
            
        elif alert_type.startswith("component_failure"):
            # 重启故障组件
            component = alert_type.split(":")[1]
            logger.info(f"尝试重启组件: {component}")
            
            if component == "resume_parser":
                self.system.resume_parser = ResumeParser(self.system.kg)
            elif component == "kafka_pipeline":
                self.system.kafka_pipeline._shutdown()
                self.system.kafka_pipeline.start()
    
    def _save_monitoring_data(self):
        """保存监控数据"""
        status_file = self.monitor_dir / "status.json"
        with open(status_file, "w") as f:
            json.dump(self.health_status, f, indent=2)
    
    def generate_report(self, hours: int = 24) -> Dict[str, Any]:
        """生成监控报告"""
        report = {
            "time_range": f"last_{hours}_hours",
            "generated_at": datetime.now().isoformat(),
            "system_id": self.system.system_id,
            "summary": {
                "alerts": 0,
                "incidents": 0,
                "avg_cpu": 0,
                "avg_memory": 0
            },
            "details": []
        }
        
        # 分析指标文件
        metrics_files = sorted(
            self.monitor_dir.glob("metrics_*.json"),
            key=lambda x: x.name,
            reverse=True
        )
        
        if metrics_files:
            # 读取最新指标文件
            with open(metrics_files[0], "r") as f:
                lines = f.readlines()[-hours*60:]  # 每小时60个数据点
                
                cpu_sum = 0
                mem_sum = 0
                count = 0
                
                for line in lines:
                    try:
                        data = json.loads(line)
                        cpu_sum += data["cpu"]
                        mem_sum += data["memory"]
                        count += 1
                    except:
                        continue
                        
                if count > 0:
                    report["summary"]["avg_cpu"] = round(cpu_sum / count, 1)
                    report["summary"]["avg_memory"] = round(mem_sum / count, 1)
        
        # 分析警报
        alert_file = self.monitor_dir / "alerts.json"
        if alert_file.exists():
            with open(alert_file, "r") as f:
                alerts = [json.loads(line) for line in f.readlines()]
                recent_alerts = [
                    a for a in alerts 
                    if (datetime.now() - datetime.fromisoformat(a["timestamp"])).total_seconds() <= hours*3600
                ]
                report["summary"]["alerts"] = len(recent_alerts)
                report["details"].extend(recent_alerts[:10])  # 最多10个详情
        
        return report

if __name__ == "__main__":
    # 示例使用
    system = CareerDevelopmentSystem({
        "kafka_config": {
            "bootstrap_servers": "localhost:9092",
            "topic": "career_events"
        }
    })
    
    monitor = SystemMonitor(system)
    
    # 启动监控 (在后台线程)
    import threading
    monitor_thread = threading.Thread(
        target=monitor.start_monitoring,
        daemon=True
    )
    monitor_thread.start()
    
    # 生成示例报告
    print("\n生成监控报告...")
    report = monitor.generate_report(1)  # 最近1小时
    print(f"系统状态: {monitor.health_status['overall']}")
    print(f"平均CPU使用率: {report['summary']['avg_cpu']}%")
    print(f"警报数量: {report['summary']['alerts']}")
    
    # 保持运行
    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        monitor.running = False
        system.shutdown()