import psutil
import paramiko
import socket
import logging
from datetime import datetime, timedelta
from django.utils import timezone
from django.conf import settings
from celery import shared_task
from .models import HostMetrics, ProcessInfo, ServiceStatus, AlertRule
from cmdb.models import Host

logger = logging.getLogger(__name__)


@shared_task(bind=True, max_retries=3)
def collect_host_metrics(self, host_id):
    """收集单个主机的监控指标"""
    try:
        host = Host.objects.get(id=host_id, is_active=True, monitor_enabled=True)
        
        # 建立SSH连接
        ssh_client = paramiko.SSHClient()
        ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
        
        try:
            # 连接主机
            if host.ssh_auth_type == 'password':
                password = host.decrypt_password()
                ssh_client.connect(
                    hostname=host.ip,
                    port=host.ssh_port,
                    username=host.ssh_username,
                    password=password,
                    timeout=settings.SSH_CONNECTION_TIMEOUT
                )
            else:
                private_key = host.decrypt_private_key()
                if private_key:
                    from io import StringIO
                    key = paramiko.RSAKey.from_private_key(StringIO(private_key))
                    ssh_client.connect(
                        hostname=host.ip,
                        port=host.ssh_port,
                        username=host.ssh_username,
                        pkey=key,
                        timeout=settings.SSH_CONNECTION_TIMEOUT
                    )
                else:
                    raise Exception("SSH私钥未配置")
            
            # 收集系统指标
            metrics_data = {}
            
            # CPU使用率
            stdin, stdout, stderr = ssh_client.exec_command(
                "top -bn1 | grep 'Cpu(s)' | awk '{print $2}' | cut -d'%' -f1"
            )
            cpu_usage = float(stdout.read().decode().strip())
            metrics_data['cpu_usage'] = cpu_usage
            
            # 系统负载
            stdin, stdout, stderr = ssh_client.exec_command("uptime")
            uptime_output = stdout.read().decode().strip()
            load_parts = uptime_output.split('load average:')[1].strip().split(',')
            metrics_data['cpu_load_1m'] = float(load_parts[0].strip())
            metrics_data['cpu_load_5m'] = float(load_parts[1].strip())
            metrics_data['cpu_load_15m'] = float(load_parts[2].strip())
            
            # 内存信息
            stdin, stdout, stderr = ssh_client.exec_command("free -b")
            memory_output = stdout.read().decode().strip().split('\n')[1]
            memory_parts = memory_output.split()
            memory_total = int(memory_parts[1])
            memory_used = int(memory_parts[2])
            memory_free = int(memory_parts[3])
            memory_usage = (memory_used / memory_total) * 100
            
            metrics_data.update({
                'memory_total': memory_total,
                'memory_used': memory_used,
                'memory_free': memory_free,
                'memory_usage': memory_usage
            })
            
            # 磁盘信息
            stdin, stdout, stderr = ssh_client.exec_command("df -B1 / | tail -1")
            disk_output = stdout.read().decode().strip()
            disk_parts = disk_output.split()
            disk_total = int(disk_parts[1])
            disk_used = int(disk_parts[2])
            disk_free = int(disk_parts[3])
            disk_usage = (disk_used / disk_total) * 100
            
            metrics_data.update({
                'disk_total': disk_total,
                'disk_used': disk_used,
                'disk_free': disk_free,
                'disk_usage': disk_usage
            })
            
            # 网络信息
            stdin, stdout, stderr = ssh_client.exec_command(
                "cat /proc/net/dev | grep -E '(eth0|ens|enp)' | head -1"
            )
            network_output = stdout.read().decode().strip()
            if network_output:
                network_parts = network_output.split()
                metrics_data.update({
                    'network_bytes_recv': int(network_parts[1]),
                    'network_packets_recv': int(network_parts[2]),
                    'network_bytes_sent': int(network_parts[9]),
                    'network_packets_sent': int(network_parts[10])
                })
            else:
                metrics_data.update({
                    'network_bytes_recv': 0,
                    'network_packets_recv': 0,
                    'network_bytes_sent': 0,
                    'network_packets_sent': 0
                })
            
            # 进程信息
            stdin, stdout, stderr = ssh_client.exec_command("ps aux | wc -l")
            process_count = int(stdout.read().decode().strip()) - 1  # 减去标题行
            
            stdin, stdout, stderr = ssh_client.exec_command("ps aux | grep -c '[Zz]ombie'")
            zombie_count = int(stdout.read().decode().strip())
            
            metrics_data.update({
                'process_count': process_count,
                'zombie_process_count': zombie_count
            })
            
            # 系统运行时间
            stdin, stdout, stderr = ssh_client.exec_command("cat /proc/uptime")
            uptime_seconds = float(stdout.read().decode().strip().split()[0])
            boot_time = timezone.now() - timedelta(seconds=uptime_seconds)
            
            metrics_data.update({
                'uptime': int(uptime_seconds),
                'boot_time': boot_time
            })
            
            # 保存监控数据
            HostMetrics.objects.create(
                host=host,
                **metrics_data
            )
            
            # 更新主机状态
            host.status = 'online'
            host.last_heartbeat = timezone.now()
            host.save(update_fields=['status', 'last_heartbeat'])
            
            logger.info(f"Successfully collected metrics for host {host.hostname}")
            
        except Exception as e:
            logger.error(f"Failed to collect metrics for host {host.hostname}: {str(e)}")
            # 更新主机状态为离线
            host.status = 'offline'
            host.save(update_fields=['status'])
            raise
        finally:
            ssh_client.close()
            
    except Host.DoesNotExist:
        logger.error(f"Host with id {host_id} not found or not active")
    except Exception as e:
        logger.error(f"Error collecting metrics for host {host_id}: {str(e)}")
        # 重试机制
        if self.request.retries < self.max_retries:
            raise self.retry(countdown=60, exc=e)


@shared_task
def collect_all_host_metrics():
    """收集所有主机的监控指标"""
    active_hosts = Host.objects.filter(is_active=True, monitor_enabled=True)
    
    for host in active_hosts:
        collect_host_metrics.delay(host.id)
    
    logger.info(f"Scheduled metrics collection for {active_hosts.count()} hosts")


@shared_task(bind=True, max_retries=3)
def collect_process_info(self, host_id):
    """收集主机进程信息"""
    try:
        host = Host.objects.get(id=host_id, is_active=True, monitor_enabled=True)
        
        ssh_client = paramiko.SSHClient()
        ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
        
        try:
            # 连接主机
            if host.ssh_auth_type == 'password':
                password = host.decrypt_password()
                ssh_client.connect(
                    hostname=host.ip,
                    port=host.ssh_port,
                    username=host.ssh_username,
                    password=password,
                    timeout=settings.SSH_CONNECTION_TIMEOUT
                )
            else:
                private_key = host.decrypt_private_key()
                if private_key:
                    from io import StringIO
                    key = paramiko.RSAKey.from_private_key(StringIO(private_key))
                    ssh_client.connect(
                        hostname=host.ip,
                        port=host.ssh_port,
                        username=host.ssh_username,
                        pkey=key,
                        timeout=settings.SSH_CONNECTION_TIMEOUT
                    )
            
            # 获取TOP 10 CPU使用率最高的进程
            stdin, stdout, stderr = ssh_client.exec_command(
                "ps aux --sort=-%cpu | head -11 | tail -10"
            )
            process_output = stdout.read().decode().strip()
            
            # 清理旧的进程信息（保留最近的记录）
            ProcessInfo.objects.filter(
                host=host,
                collected_at__lt=timezone.now() - timedelta(hours=1)
            ).delete()
            
            # 解析进程信息
            for line in process_output.split('\n'):
                if line.strip():
                    parts = line.split()
                    if len(parts) >= 11:
                        try:
                            ProcessInfo.objects.create(
                                host=host,
                                pid=int(parts[1]),
                                name=parts[10],
                                cmdline=' '.join(parts[10:]),
                                cpu_percent=float(parts[2]),
                                memory_percent=float(parts[3]),
                                memory_rss=int(parts[5]) * 1024,  # KB to bytes
                                memory_vms=int(parts[4]) * 1024,  # KB to bytes
                                status=parts[7],
                                create_time=timezone.now()  # 简化处理
                            )
                        except (ValueError, IndexError) as e:
                            logger.warning(f"Failed to parse process line: {line}, error: {e}")
            
            logger.info(f"Successfully collected process info for host {host.hostname}")
            
        except Exception as e:
            logger.error(f"Failed to collect process info for host {host.hostname}: {str(e)}")
            raise
        finally:
            ssh_client.close()
            
    except Host.DoesNotExist:
        logger.error(f"Host with id {host_id} not found or not active")
    except Exception as e:
        logger.error(f"Error collecting process info for host {host_id}: {str(e)}")
        if self.request.retries < self.max_retries:
            raise self.retry(countdown=120, exc=e)


@shared_task
def collect_all_process_info():
    """收集所有主机的进程信息"""
    active_hosts = Host.objects.filter(is_active=True, monitor_enabled=True)

    for host in active_hosts:
        collect_process_info.delay(host.id)

    logger.info(f"Scheduled process info collection for {active_hosts.count()} hosts")


@shared_task(bind=True, max_retries=3)
def check_service_status(self, host_id, services=None):
    """检查主机服务状态"""
    if services is None:
        services = ['nginx', 'apache2', 'mysql', 'postgresql', 'redis', 'docker']

    try:
        host = Host.objects.get(id=host_id, is_active=True)

        ssh_client = paramiko.SSHClient()
        ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())

        try:
            # 连接主机
            if host.ssh_auth_type == 'password':
                password = host.decrypt_password()
                ssh_client.connect(
                    hostname=host.ip,
                    port=host.ssh_port,
                    username=host.ssh_username,
                    password=password,
                    timeout=settings.SSH_CONNECTION_TIMEOUT
                )
            else:
                private_key = host.decrypt_private_key()
                if private_key:
                    from io import StringIO
                    key = paramiko.RSAKey.from_private_key(StringIO(private_key))
                    ssh_client.connect(
                        hostname=host.ip,
                        port=host.ssh_port,
                        username=host.ssh_username,
                        pkey=key,
                        timeout=settings.SSH_CONNECTION_TIMEOUT
                    )

            # 检查每个服务的状态
            for service in services:
                try:
                    # 使用systemctl检查服务状态
                    stdin, stdout, stderr = ssh_client.exec_command(
                        f"systemctl is-active {service} 2>/dev/null || echo 'unknown'"
                    )
                    status_output = stdout.read().decode().strip()

                    # 获取服务PID
                    stdin, stdout, stderr = ssh_client.exec_command(
                        f"systemctl show {service} --property=MainPID --value 2>/dev/null || echo '0'"
                    )
                    pid_output = stdout.read().decode().strip()
                    pid = int(pid_output) if pid_output.isdigit() else None

                    # 映射状态
                    status_map = {
                        'active': 'running',
                        'inactive': 'stopped',
                        'failed': 'failed',
                        'unknown': 'unknown'
                    }
                    status = status_map.get(status_output, 'unknown')

                    # 保存服务状态
                    ServiceStatus.objects.create(
                        host=host,
                        service_name=service,
                        status=status,
                        pid=pid
                    )

                except Exception as e:
                    logger.warning(f"Failed to check service {service} on host {host.hostname}: {e}")
                    ServiceStatus.objects.create(
                        host=host,
                        service_name=service,
                        status='unknown'
                    )

            logger.info(f"Successfully checked services for host {host.hostname}")

        except Exception as e:
            logger.error(f"Failed to check services for host {host.hostname}: {str(e)}")
            raise
        finally:
            ssh_client.close()

    except Host.DoesNotExist:
        logger.error(f"Host with id {host_id} not found or not active")
    except Exception as e:
        logger.error(f"Error checking services for host {host_id}: {str(e)}")
        if self.request.retries < self.max_retries:
            raise self.retry(countdown=180, exc=e)


@shared_task
def check_all_service_status():
    """检查所有主机的服务状态"""
    active_hosts = Host.objects.filter(is_active=True, monitor_enabled=True)

    for host in active_hosts:
        check_service_status.delay(host.id)

    logger.info(f"Scheduled service status check for {active_hosts.count()} hosts")


@shared_task
def cleanup_old_metrics():
    """清理过期的监控数据"""
    retention_days = settings.MONITORING_DATA_RETENTION_DAYS
    cutoff_date = timezone.now() - timedelta(days=retention_days)

    # 清理主机指标数据
    deleted_metrics = HostMetrics.objects.filter(collected_at__lt=cutoff_date).delete()
    logger.info(f"Deleted {deleted_metrics[0]} old host metrics records")

    # 清理进程信息
    deleted_processes = ProcessInfo.objects.filter(collected_at__lt=cutoff_date).delete()
    logger.info(f"Deleted {deleted_processes[0]} old process info records")

    # 清理服务状态
    deleted_services = ServiceStatus.objects.filter(collected_at__lt=cutoff_date).delete()
    logger.info(f"Deleted {deleted_services[0]} old service status records")


@shared_task
def check_alerts():
    """检查告警规则"""
    active_rules = AlertRule.objects.filter(is_active=True)

    for rule in active_rules:
        try:
            # 获取最新的监控数据
            latest_metrics = HostMetrics.objects.filter(
                collected_at__gte=timezone.now() - timedelta(seconds=rule.duration)
            ).order_by('-collected_at')

            for metrics in latest_metrics:
                # 检查是否触发告警
                current_value = getattr(metrics, rule.metric_type, None)
                if current_value is not None:
                    triggered = False

                    if rule.operator == '>':
                        triggered = current_value > rule.threshold
                    elif rule.operator == '>=':
                        triggered = current_value >= rule.threshold
                    elif rule.operator == '<':
                        triggered = current_value < rule.threshold
                    elif rule.operator == '<=':
                        triggered = current_value <= rule.threshold
                    elif rule.operator == '==':
                        triggered = current_value == rule.threshold
                    elif rule.operator == '!=':
                        triggered = current_value != rule.threshold

                    if triggered:
                        logger.warning(
                            f"Alert triggered: {rule.name} on host {metrics.host.hostname}, "
                            f"current value: {current_value}, threshold: {rule.threshold}"
                        )
                        # 这里可以添加发送告警通知的逻辑

        except Exception as e:
            logger.error(f"Error checking alert rule {rule.name}: {str(e)}")


@shared_task
def update_host_status():
    """更新主机状态"""
    # 将超过5分钟没有心跳的主机标记为离线
    offline_threshold = timezone.now() - timedelta(minutes=5)

    offline_hosts = Host.objects.filter(
        is_active=True,
        status='online',
        last_heartbeat__lt=offline_threshold
    )

    updated_count = offline_hosts.update(status='offline')
    if updated_count > 0:
        logger.info(f"Marked {updated_count} hosts as offline")


@shared_task
def test_host_connectivity(host_id):
    """测试主机连通性"""
    try:
        host = Host.objects.get(id=host_id, is_active=True)

        # Ping测试
        ping_result = False
        try:
            import subprocess
            result = subprocess.run(
                ['ping', '-c', '1', '-W', '3', host.ip],
                capture_output=True,
                timeout=10
            )
            ping_result = result.returncode == 0
        except Exception as e:
            logger.warning(f"Ping test failed for {host.hostname}: {e}")

        # SSH连接测试
        ssh_result = False
        try:
            ssh_client = paramiko.SSHClient()
            ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())

            if host.ssh_auth_type == 'password':
                password = host.decrypt_password()
                ssh_client.connect(
                    hostname=host.ip,
                    port=host.ssh_port,
                    username=host.ssh_username,
                    password=password,
                    timeout=5
                )
            else:
                private_key = host.decrypt_private_key()
                if private_key:
                    from io import StringIO
                    key = paramiko.RSAKey.from_private_key(StringIO(private_key))
                    ssh_client.connect(
                        hostname=host.ip,
                        port=host.ssh_port,
                        username=host.ssh_username,
                        pkey=key,
                        timeout=5
                    )

            ssh_result = True
            ssh_client.close()

        except Exception as e:
            logger.warning(f"SSH test failed for {host.hostname}: {e}")

        return {
            'host_id': host_id,
            'hostname': host.hostname,
            'ip': host.ip,
            'ping_result': ping_result,
            'ssh_result': ssh_result,
            'overall_result': ping_result and ssh_result
        }

    except Host.DoesNotExist:
        return {
            'host_id': host_id,
            'error': 'Host not found'
        }
    except Exception as e:
        return {
            'host_id': host_id,
            'error': str(e)
        }
