# enhanced_ops.py
import os
import psutil
import platform
import datetime
import re
import shutil
import logging
import json
import subprocess
from pathlib import Path
from typing import List, Dict, Optional, Tuple
import threading
import time


class EnhancedCloudOps:
    """增强的云计算运维工具类"""
    def __init__(self, app=None):
        """初始化运维工具"""
        self.app = app
        self.os_info = self.get_os_info()
        self.backup_dir = "backups"
        self.log_dir = "logs"
        self.monitoring_enabled = False
        self.monitoring_thread = None

        # 确保目录存在
        os.makedirs(self.backup_dir, exist_ok=True)
        os.makedirs(self.log_dir, exist_ok=True)

        # 配置日志
        self.setup_logging()

    def setup_logging(self):
        """配置日志系统"""
        log_file = os.path.join(self.log_dir, f"ops_{datetime.datetime.now().strftime('%Y%m%d')}.log")

        logging.basicConfig(
            level=logging.INFO,
            format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
            handlers=[
                logging.FileHandler(log_file, encoding='utf-8'),
                logging.StreamHandler()
            ]
        )
        self.logger = logging.getLogger('EnhancedCloudOps')

    def get_os_info(self):
        """获取操作系统基本信息"""
        try:
            return {
                'system': platform.system(),
                'release': platform.release(),
                'version': platform.version(),
                'hostname': platform.node(),
                'processor': platform.processor(),
                'architecture': platform.machine(),
                'platform': platform.platform()
            }
        except Exception as e:
            return {'error': f'获取系统信息失败: {str(e)}'}

    def get_cpu_usage(self):
        """获取CPU使用率"""
        try:
            cpu_percent = psutil.cpu_percent(interval=1)
            cpu_times = psutil.cpu_times_percent(interval=1)

            return {
                'usage_percent': cpu_percent,
                'physical_cores': psutil.cpu_count(logical=False),
                'logical_cores': psutil.cpu_count(logical=True),
                'per_cpu_usage': psutil.cpu_percent(percpu=True, interval=1),
                'user_time': getattr(cpu_times, 'user', 0),
                'system_time': getattr(cpu_times, 'system', 0),
                'idle_time': getattr(cpu_times, 'idle', 0)
            }
        except Exception as e:
            self.logger.error(f"获取CPU使用率失败: {str(e)}")
            return {'error': str(e)}

    def get_memory_usage(self):
        """获取内存使用情况"""
        try:
            mem = psutil.virtual_memory()
            swap = psutil.swap_memory()

            return {
                'total_gb': round(mem.total / (1024 ** 3), 2),
                'used_gb': round(mem.used / (1024 ** 3), 2),
                'available_gb': round(mem.available / (1024 ** 3), 2),
                'usage_percent': mem.percent,
                'swap_total_gb': round(swap.total / (1024 ** 3), 2),
                'swap_used_gb': round(swap.used / (1024 ** 3), 2),
                'swap_usage_percent': swap.percent
            }
        except Exception as e:
            self.logger.error(f"获取内存使用情况失败: {str(e)}")
            return {'error': str(e)}

    def get_disk_usage(self, path='/'):
        """获取磁盘使用情况"""
        try:
            disk = psutil.disk_usage(path)
            disk_io = psutil.disk_io_counters()

            result = {
                'path': path,
                'total_gb': round(disk.total / (1024 ** 3), 2),
                'used_gb': round(disk.used / (1024 ** 3), 2),
                'free_gb': round(disk.free / (1024 ** 3), 2),
                'usage_percent': disk.percent
            }

            if disk_io:
                result.update({
                    'read_mb': round(disk_io.read_bytes / (1024 ** 2), 2),
                    'write_mb': round(disk_io.write_bytes / (1024 ** 2), 2),
                    'read_count': disk_io.read_count,
                    'write_count': disk_io.write_count
                })

            return result
        except Exception as e:
            self.logger.error(f"获取磁盘使用情况失败: {str(e)}")
            return {'error': str(e)}

    def get_network_info(self):
        """获取网络信息"""
        try:
            net_io = psutil.net_io_counters()
            net_connections = psutil.net_connections()

            return {
                'bytes_sent_mb': round(net_io.bytes_sent / (1024 ** 2), 2),
                'bytes_recv_mb': round(net_io.bytes_recv / (1024 ** 2), 2),
                'packets_sent': net_io.packets_sent,
                'packets_recv': net_io.packets_recv,
                'active_connections': len(net_connections)
            }
        except Exception as e:
            self.logger.error(f"获取网络信息失败: {str(e)}")
            return {'error': str(e)}

    def get_process_info(self, top_n=10):
        """获取进程信息"""
        try:
            processes = []
            for proc in psutil.process_iter(['pid', 'name', 'cpu_percent', 'memory_percent', 'status']):
                try:
                    processes.append(proc.info)
                except (psutil.NoSuchProcess, psutil.AccessDenied):
                    pass

            # 按CPU使用率排序
            processes.sort(key=lambda x: x.get('cpu_percent', 0), reverse=True)
            return processes[:top_n]
        except Exception as e:
            self.logger.error(f"获取进程信息失败: {str(e)}")
            return {'error': str(e)}

    def check_service_status(self, service_name):
        """检查服务状态"""
        if self.os_info.get('system') != 'Linux':
            return {'error': f"服务检查仅支持Linux系统，当前系统: {self.os_info.get('system', 'Unknown')}"}

        try:
            # 尝试使用systemctl检查服务
            result = subprocess.run(
                ['systemctl', 'is-active', service_name],
                capture_output=True, text=True, timeout=10
            )

            status = result.stdout.strip()
            if status in ['active', 'inactive', 'failed']:
                return {
                    'service_name': service_name,
                    'status': status,
                    'description': {
                        'active': '运行中',
                        'inactive': '未运行',
                        'failed': '启动失败'
                    }.get(status, '未知状态')
                }

            # 尝试使用ps命令检查
            result = subprocess.run(
                ['ps', 'aux'], capture_output=True, text=True
            )
            if service_name in result.stdout:
                return {
                    'service_name': service_name,
                    'status': 'running',
                    'description': '运行中'
                }
            else:
                return {
                    'service_name': service_name,
                    'status': 'stopped',
                    'description': '未运行'
                }

        except subprocess.TimeoutExpired:
            return {'error': f"检查服务超时: {service_name}"}
        except Exception as e:
            return {'error': f"检查服务时出错: {str(e)}"}

    def analyze_logs(self, log_file, error_keywords=None, lines_to_analyze=1000):
        """分析日志文件"""
        if error_keywords is None:
            error_keywords = ['error', 'fail', 'warning', 'exception', 'critical']

        if not os.path.exists(log_file):
            return {'error': f"日志文件不存在: {log_file}"}

        try:
            errors = []
            with open(log_file, 'r', encoding='utf-8', errors='ignore') as f:
                lines = f.readlines()[-lines_to_analyze:]  # 只分析最后N行

                for line_num, line in enumerate(lines, 1):
                    line_lower = line.lower()
                    for keyword in error_keywords:
                        if keyword in line_lower:
                            errors.append({
                                'line_number': line_num,
                                'keyword': keyword,
                                'content': line.strip()[:200],  # 限制内容长度
                                'timestamp': line.split()[0] if line.split() else 'Unknown'
                            })
                            break

            # 统计错误类型
            error_stats = {}
            for error in errors:
                keyword = error['keyword']
                error_stats[keyword] = error_stats.get(keyword, 0) + 1

            return {
                'log_file': log_file,
                'total_errors': len(errors),
                'error_statistics': error_stats,
                'recent_errors': errors[:20]  # 只返回最近20条错误
            }

        except Exception as e:
            return {'error': f"分析日志时出错: {str(e)}"}

    def create_backup(self, backup_type='full', include_logs=True):
        """创建系统备份"""
        try:
            timestamp = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
            backup_name = f"backup_{backup_type}_{timestamp}"
            backup_path = os.path.join(self.backup_dir, backup_name)

            os.makedirs(backup_path, exist_ok=True)

            backup_info = {
                'backup_name': backup_name,
                'backup_path': backup_path,
                'backup_type': backup_type,
                'created_at': datetime.datetime.now().isoformat(),
                'size_bytes': 0,
                'files': []
            }

            # 备份数据库（如果存在）
            if self.app and hasattr(self.app, 'config'):
                db_config = self.app.config.get('DATABASE_CONFIG', {})
                if db_config:
                    db_backup_file = self.backup_database(db_config, backup_path)
                    if db_backup_file:
                        backup_info['files'].append(db_backup_file)

            # 备份重要配置文件
            config_files = self.backup_config_files(backup_path)
            backup_info['files'].extend(config_files)

            # 备份日志文件（可选）
            if include_logs:
                log_files = self.backup_log_files(backup_path)
                backup_info['files'].extend(log_files)

            # 计算备份大小
            total_size = 0
            for file_info in backup_info['files']:
                total_size += file_info.get('size_bytes', 0)
            backup_info['size_bytes'] = total_size
            backup_info['size_human'] = self.format_size(total_size)

            # 保存备份信息
            info_file = os.path.join(backup_path, 'backup_info.json')
            with open(info_file, 'w', encoding='utf-8') as f:
                json.dump(backup_info, f, indent=2, ensure_ascii=False)

            self.logger.info(f"备份创建成功: {backup_name}, 大小: {backup_info['size_human']}")
            return backup_info

        except Exception as e:
            self.logger.error(f"创建备份失败: {str(e)}")
            return {'error': f"创建备份失败: {str(e)}"}

    def backup_database(self, db_config, backup_path):
        """备份数据库"""
        try:
            import mysql.connector

            conn = mysql.connector.connect(**db_config)
            cursor = conn.cursor(dictionary=True)

            # 获取所有表
            cursor.execute("SHOW TABLES")
            tables = [list(table.values())[0] for table in cursor.fetchall()]

            backup_content = []
            backup_content.append("-- Database Backup")
            backup_content.append(f"-- Created: {datetime.datetime.now()}")
            backup_content.append(f"-- Database: {db_config.get('database', 'Unknown')}")
            backup_content.append("")

            for table in tables:
                # 获取表结构
                cursor.execute(f"SHOW CREATE TABLE {table}")
                create_table = cursor.fetchone()[f'Create Table']
                backup_content.append(f"DROP TABLE IF EXISTS `{table}`;")
                backup_content.append(f"{create_table};")
                backup_content.append("")

                # 获取表数据
                cursor.execute(f"SELECT * FROM `{table}`")
                rows = cursor.fetchall()
                if rows:
                    columns = list(rows[0].keys())
                    columns_str = ', '.join([f"`{col}`" for col in columns])

                    for row in rows:
                        values = []
                        for col in columns:
                            value = row[col]
                            if value is None:
                                values.append('NULL')
                            elif isinstance(value, (int, float)):
                                values.append(str(value))
                            else:
                                # 转义特殊字符
                                value_str = str(value).replace("'", "''")
                                values.append(f"'{value_str}'")

                        values_str = ', '.join(values)
                        backup_content.append(f"INSERT INTO `{table}` ({columns_str}) VALUES ({values_str});")

                    backup_content.append("")

            conn.close()

            # 保存备份文件
            db_backup_file = os.path.join(backup_path, 'database_backup.sql')
            with open(db_backup_file, 'w', encoding='utf-8') as f:
                f.write('\n'.join(backup_content))

            file_size = os.path.getsize(db_backup_file)

            return {
                'file_type': 'database',
                'file_path': db_backup_file,
                'size_bytes': file_size,
                'table_count': len(tables)
            }

        except Exception as e:
            self.logger.error(f"数据库备份失败: {str(e)}")
            return None

    def backup_config_files(self, backup_path):
        """备份配置文件"""
        config_files = []
        config_extensions = ['.py', '.json', '.yaml', '.yml', '.ini', '.cfg']

        try:
            # 备份当前目录下的配置文件
            for file_path in Path('.').rglob('*'):
                if file_path.suffix.lower() in config_extensions and file_path.is_file():
                    try:
                        relative_path = file_path.relative_to('.')
                        backup_file_path = os.path.join(backup_path, 'configs', str(relative_path))
                        os.makedirs(os.path.dirname(backup_file_path), exist_ok=True)

                        shutil.copy2(file_path, backup_file_path)

                        config_files.append({
                            'file_type': 'config',
                            'file_path': backup_file_path,
                            'original_path': str(file_path),
                            'size_bytes': os.path.getsize(backup_file_path)
                        })
                    except Exception as e:
                        self.logger.warning(f"备份配置文件失败 {file_path}: {str(e)}")

            return config_files

        except Exception as e:
            self.logger.error(f"备份配置文件失败: {str(e)}")
            return []

    def backup_log_files(self, backup_path):
        """备份日志文件"""
        log_files = []

        try:
            if os.path.exists(self.log_dir):
                for log_file in os.listdir(self.log_dir):
                    if log_file.endswith('.log'):
                        source_path = os.path.join(self.log_dir, log_file)
                        dest_path = os.path.join(backup_path, 'logs', log_file)
                        os.makedirs(os.path.dirname(dest_path), exist_ok=True)

                        shutil.copy2(source_path, dest_path)

                        log_files.append({
                            'file_type': 'log',
                            'file_path': dest_path,
                            'size_bytes': os.path.getsize(dest_path)
                        })

            return log_files

        except Exception as e:
            self.logger.error(f"备份日志文件失败: {str(e)}")
            return []

    def get_backup_list(self, limit=20):
        """获取备份列表(优化版 - 限制返回数量)
        
        Args:
            limit: 返回的最大备份数量,默认20个
        """
        try:
            backups = []
            if not os.path.exists(self.backup_dir):
                self.logger.warning(f"备份目录不存在: {self.backup_dir}")
                return backups

            # 只处理目录,并按修改时间排序(避免读取所有文件)
            backup_dirs = []
            for name in os.listdir(self.backup_dir):
                item_path = os.path.join(self.backup_dir, name)
                if os.path.isdir(item_path) and name.startswith('backup_'):
                    mtime = os.path.getmtime(item_path)
                    backup_dirs.append((name, mtime))
            
            # 按时间倒序排序
            backup_dirs.sort(key=lambda x: x[1], reverse=True)
            
            # 只读取前limit个备份的信息
            for backup_name, mtime in backup_dirs[:limit]:
                backup_path = os.path.join(self.backup_dir, backup_name)
                info_file = os.path.join(backup_path, 'backup_info.json')

                if os.path.exists(info_file):
                    try:
                        with open(info_file, 'r', encoding='utf-8') as f:
                            backup_info = json.load(f)
                        backups.append(backup_info)
                        self.logger.debug(f"成功读取备份信息: {backup_name}")
                    except Exception as e:
                        # 如果info文件不存在或损坏,创建基本信息
                        self.logger.warning(f"读取备份信息失败 {backup_name}: {str(e)}")
                        backups.append({
                            'backup_name': backup_name,
                            'backup_path': backup_path,
                            'created_at': datetime.datetime.fromtimestamp(mtime).isoformat(),
                            'size_human': 'Unknown'
                        })
                else:
                    # 没有info文件,生成基本信息
                    self.logger.warning(f"备份缺少info文件: {backup_name}")
                    backups.append({
                        'backup_name': backup_name,
                        'backup_path': backup_path,
                        'created_at': datetime.datetime.fromtimestamp(mtime).isoformat(),
                        'size_human': 'Unknown'
                    })
            
            self.logger.info(f"成功获取 {len(backups)} 个备份信息")
            return backups

        except Exception as e:
            self.logger.error(f"获取备份列表失败: {str(e)}")
            return []

    def restore_backup(self, backup_name):
        """恢复备份"""
        try:
            backup_path = os.path.join(self.backup_dir, backup_name)
            info_file = os.path.join(backup_path, 'backup_info.json')

            if not os.path.exists(info_file):
                return {'error': f"备份信息文件不存在: {backup_name}"}

            with open(info_file, 'r', encoding='utf-8') as f:
                backup_info = json.load(f)

            # 这里实现恢复逻辑
            # 注意：恢复操作需要谨慎，这里只返回备份信息
            self.logger.info(f"开始恢复备份: {backup_name}")

            return {
                'success': True,
                'message': f"备份恢复流程已启动: {backup_name}",
                'backup_info': backup_info
            }

        except Exception as e:
            self.logger.error(f"恢复备份失败: {str(e)}")
            return {'error': f"恢复备份失败: {str(e)}"}

    def delete_backup(self, backup_name):
        """删除备份"""
        try:
            backup_path = os.path.join(self.backup_dir, backup_name)

            if not os.path.exists(backup_path):
                return {'error': f"备份不存在: {backup_name}"}

            # 安全检查
            if not backup_name.startswith('backup_'):
                return {'error': '无效的备份名称'}

            shutil.rmtree(backup_path)
            self.logger.info(f"备份已删除: {backup_name}")

            return {'success': True, 'message': f"备份已删除: {backup_name}"}

        except Exception as e:
            self.logger.error(f"删除备份失败: {str(e)}")
            return {'error': f"删除备份失败: {str(e)}"}

    def get_system_health(self):
        """获取系统健康状态"""
        try:
            cpu_info = self.get_cpu_usage()
            memory_info = self.get_memory_usage()
            disk_info = self.get_disk_usage()
            network_info = self.get_network_info()

            # 评估系统健康状态
            health_score = 100
            warnings = []

            # CPU健康检查
            if cpu_info.get('usage_percent', 0) > 80:
                health_score -= 10
                warnings.append("CPU使用率过高")

            # 内存健康检查
            if memory_info.get('usage_percent', 0) > 85:
                health_score -= 15
                warnings.append("内存使用率过高")

            # 磁盘健康检查
            if disk_info.get('usage_percent', 0) > 90:
                health_score -= 20
                warnings.append("磁盘空间不足")

            # 确定健康状态
            if health_score >= 90:
                status = "健康"
                status_color = "green"
            elif health_score >= 70:
                status = "一般"
                status_color = "yellow"
            else:
                status = "警告"
                status_color = "red"

            return {
                'status': status,
                'status_color': status_color,
                'health_score': health_score,
                'warnings': warnings,
                'check_time': datetime.datetime.now().isoformat(),
                'cpu': cpu_info,
                'memory': memory_info,
                'disk': disk_info,
                'network': network_info,
                'os_info': self.os_info
            }

        except Exception as e:
            self.logger.error(f"获取系统健康状态失败: {str(e)}")
            return {'error': str(e)}

    def start_monitoring(self, interval=60):
        """启动系统监控"""
        if self.monitoring_enabled:
            return {'warning': '监控已经在运行中'}

        self.monitoring_enabled = True
        self.monitoring_thread = threading.Thread(
            target=self._monitoring_worker,
            args=(interval,),
            daemon=True
        )
        self.monitoring_thread.start()

        return {'success': True, 'message': f'系统监控已启动，间隔: {interval}秒'}

    def stop_monitoring(self):
        """停止系统监控"""
        self.monitoring_enabled = False
        if self.monitoring_thread:
            self.monitoring_thread.join(timeout=5)

        return {'success': True, 'message': '系统监控已停止'}

    def _monitoring_worker(self, interval):
        """监控工作线程"""
        while self.monitoring_enabled:
            try:
                health_status = self.get_system_health()

                # 记录监控数据
                monitor_log = {
                    'timestamp': datetime.datetime.now().isoformat(),
                    'health_status': health_status
                }

                # 保存监控数据到文件
                monitor_file = os.path.join(
                    self.log_dir,
                    f"monitor_{datetime.datetime.now().strftime('%Y%m%d')}.json"
                )

                # 读取现有数据
                monitor_data = []
                if os.path.exists(monitor_file):
                    try:
                        with open(monitor_file, 'r', encoding='utf-8') as f:
                            monitor_data = json.load(f)
                    except:
                        monitor_data = []

                # 添加新数据
                monitor_data.append(monitor_log)

                # 只保留最近1000条记录
                if len(monitor_data) > 1000:
                    monitor_data = monitor_data[-1000:]

                # 保存数据
                with open(monitor_file, 'w', encoding='utf-8') as f:
                    json.dump(monitor_data, f, indent=2, ensure_ascii=False)

                time.sleep(interval)

            except Exception as e:
                self.logger.error(f"监控工作线程出错: {str(e)}")
                time.sleep(interval)

    def format_size(self, size_bytes):
        """格式化文件大小"""
        for unit in ['B', 'KB', 'MB', 'GB', 'TB']:
            if size_bytes < 1024.0:
                return f"{size_bytes:.2f} {unit}"
            size_bytes /= 1024.0
        return f"{size_bytes:.2f} PB"

    def cleanup_old_backups(self, keep_count=10):
        """清理旧备份"""
        try:
            backups = self.get_backup_list()
            if len(backups) <= keep_count:
                return {'message': f'当前备份数量({len(backups)})未超过保留数量({keep_count})'}

            # 按创建时间排序，删除最旧的备份
            backups.sort(key=lambda x: x.get('created_at', ''))
            deleted_count = 0

            for i in range(len(backups) - keep_count):
                backup_name = backups[i]['backup_name']
                result = self.delete_backup(backup_name)
                if 'success' in result:
                    deleted_count += 1

            return {
                'success': True,
                'message': f'已清理 {deleted_count} 个旧备份',
                'deleted_count': deleted_count
            }

        except Exception as e:
            self.logger.error(f"清理旧备份失败: {str(e)}")
            return {'error': f"清理旧备份失败: {str(e)}"}