#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# config_optimizer.py - 配置优化检测模块

import re
import os
import json
import subprocess
import numpy as np
from datetime import datetime
from typing import Dict, List, Tuple
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense, Dropout


class ConfigOptimizer:
    def __init__(self, config_path: str = "/etc/httpd/conf/httpd.conf"):
        """初始化配置优化器"""
        self.config_path = config_path
        self.config_data = self._load_config()
        self.performance_metrics = []
        self.optimal_configs = self._get_optimal_configs()

    def _load_config(self) -> Dict:
        """加载web配置文件"""
        config = {}
        if not os.path.exists(self.config_path):
            return config

        with open(self.config_path, 'r') as f:
            for line in f:
                line = line.strip()
                if line and not line.startswith('#'):
                    parts = re.split(r'\s+', line, 1)
                    if len(parts) == 2:
                        key, value = parts
                        config[key.lower()] = value
        return config

    def _get_optimal_configs(self) -> Dict:
        """获取基于最佳实践的优化配置参考值"""
        return {
            "keepalive": {
                "optimal": "On",
                "description": "启用长连接减少握手开销",
                "score_weight": 0.1
            },
            "keepalivetimeout": {
                "optimal": "5",
                "range": (2, 10),
                "description": "长连接超时时间，过短增加握手次数，过长占用连接",
                "score_weight": 0.08
            },
            "maxkeepaliverequests": {
                "optimal": "100",
                "range": (50, 500),
                "description": "单个长连接最大请求数",
                "score_weight": 0.08
            },
            "startservers": {
                "optimal": "8",
                "range": (4, 16),
                "description": "初始启动的子进程数",
                "score_weight": 0.1
            },
            "minspareservers": {
                "optimal": "5",
                "range": (2, 10),
                "description": "最小空闲子进程数",
                "score_weight": 0.08
            },
            "maxspareservers": {
                "optimal": "20",
                "range": (10, 30),
                "description": "最大空闲子进程数",
                "score_weight": 0.08
            },
            "maxrequestworkers": {
                "optimal": "256",
                "range": (150, 512),
                "description": "最大并发请求处理数",
                "score_weight": 0.15
            },
            "timeout": {
                "optimal": "60",
                "range": (30, 120),
                "description": "请求超时时间",
                "score_weight": 0.07
            },
            "enablemmap": {
                "optimal": "Off",
                "description": "禁用内存映射提高稳定性",
                "score_weight": 0.06
            },
            "enableSendfile": {
                "optimal": "On",
                "description": "启用Sendfile提升静态文件性能",
                "score_weight": 0.06
            },
            "loglevel": {
                "optimal": "warn",
                "description": "生产环境降低日志级别减少IO开销",
                "score_weight": 0.06
            }
        }

    def collect_performance_metrics(self, samples: int = 5) -> List[Dict]:
        """收集系统性能指标"""
        metrics = []
        for _ in range(samples):
            try:
                # 获取CPU使用率
                cpu = subprocess.getoutput(
                    "top -b -n 1 | grep '^%Cpu' | awk '{print $2 + $4}'"
                )
                # 获取内存使用率
                mem = subprocess.getoutput(
                    "free | grep Mem | awk '{print $3/$2 * 100.0}'"
                )
                # 获取web连接数
                connections = subprocess.getoutput(
                    "netstat -an | grep :80 | wc -l"
                )
                # 获取请求处理时间
                req_time = subprocess.getoutput(
                    "curl -s -w '%{time_total}' -o /dev/null http://localhost || echo 0"
                )

                metrics.append({
                    "timestamp": datetime.now().isoformat(),
                    "cpu_usage": float(cpu) if cpu else 0,
                    "mem_usage": float(mem) if mem else 0,
                    "connections": int(connections) if connections else 0,
                    "avg_request_time": float(req_time) if req_time else 0
                })
            except Exception as e:
                print(f"收集性能指标失败: {str(e)}")

            # 间隔2秒采样一次
            import time
            time.sleep(2)

        self.performance_metrics = metrics
        return metrics

    def build_lstm_model(self, input_shape: Tuple) -> Sequential:
        """构建LSTM性能预测模型"""
        model = Sequential([
            LSTM(64, input_shape=input_shape, return_sequences=True),
            Dropout(0.2),
            LSTM(32),
            Dropout(0.2),
            Dense(16, activation='relu'),
            Dense(1, activation='linear')
        ])
        model.compile(optimizer='adam', loss='mse')
        return model

    def detect_performance_bottlenecks(self) -> Dict:
        """检测性能瓶颈"""
        if not self.performance_metrics:
            self.collect_performance_metrics()

        # 简单规则检测
        bottlenecks = []

        # CPU瓶颈检测
        avg_cpu = np.mean([m["cpu_usage"] for m in self.performance_metrics])
        if avg_cpu > 80:
            bottlenecks.append({
                "type": "cpu",
                "metric": f"平均CPU使用率 {avg_cpu:.2f}%",
                "threshold": "80%",
                "risk_level": "high",
                "description": "CPU使用率过高，可能导致请求处理延迟"
            })

        # 内存瓶颈检测
        avg_mem = np.mean([m["mem_usage"] for m in self.performance_metrics])
        if avg_mem > 85:
            bottlenecks.append({
                "type": "memory",
                "metric": f"平均内存使用率 {avg_mem:.2f}%",
                "threshold": "85%",
                "risk_level": "high",
                "description": "内存使用率过高，可能导致swap使用增加"
            })

        # 连接数瓶颈检测
        max_connections = max([m["connections"] for m in self.performance_metrics])
        max_workers = int(self.config_data.get("maxrequestworkers", 256))
        if max_connections > 0.8 * max_workers:
            bottlenecks.append({
                "type": "connections",
                "metric": f"最大连接数 {max_connections}/{max_workers}",
                "threshold": f"{0.8 * max_workers}",
                "risk_level": "medium",
                "description": "连接数接近最大工作进程限制，可能导致连接排队"
            })

        # 请求响应时间检测
        avg_req_time = np.mean([m["avg_request_time"] for m in self.performance_metrics])
        if avg_req_time > 2.0:
            bottlenecks.append({
                "type": "response_time",
                "metric": f"平均请求响应时间 {avg_req_time:.2f}秒",
                "threshold": "2.0秒",
                "risk_level": "medium",
                "description": "请求响应时间过长，影响用户体验"
            })

        return {
            "bottlenecks": bottlenecks,
            "performance_metrics": self.performance_metrics
        }

    def evaluate_config(self) -> Dict:
        """评估当前配置并生成优化建议"""
        score = 0.0
        total_weight = sum(v["score_weight"] for v in self.optimal_configs.values())
        issues = []

        for key, config_info in self.optimal_configs.items():
            current_value = self.config_data.get(key)
            if current_value is None:
                issues.append({
                    "parameter": key,
                    "current_value": "未配置",
                    "optimal_value": config_info["optimal"],
                    "description": config_info["description"],
                    "risk_level": "medium"
                })
                continue

            # 数值型参数检查
            if "range" in config_info:
                try:
                    current_num = float(current_value)
                    optimal_num = float(config_info["optimal"])
                    min_val, max_val = config_info["range"]

                    if not (min_val <= current_num <= max_val):
                        issues.append({
                            "parameter": key,
                            "current_value": current_value,
                            "optimal_value": f"{config_info['optimal']} (范围: {min_val}-{max_val})",
                            "description": config_info["description"],
                            "risk_level": "high"
                        })
                    elif abs(current_num - optimal_num) > (max_val - min_val) * 0.3:
                        issues.append({
                            "parameter": key,
                            "current_value": current_value,
                            "optimal_value": config_info["optimal"],
                            "description": config_info["description"],
                            "risk_level": "medium"
                        })
                    else:
                        score += config_info["score_weight"]
                except ValueError:
                    issues.append({
                        "parameter": key,
                        "current_value": current_value,
                        "optimal_value": config_info["optimal"],
                        "description": "参数格式错误，应为数值",
                        "risk_level": "high"
                    })
            # 布尔型/选项型参数检查
            else:
                if current_value.lower() != config_info["optimal"].lower():
                    issues.append({
                        "parameter": key,
                        "current_value": current_value,
                        "optimal_value": config_info["optimal"],
                        "description": config_info["description"],
                        "risk_level": "medium"
                    })
                else:
                    score += config_info["score_weight"]

        # 计算配置评分(0-100)
        config_score = int((score / total_weight) * 100) if total_weight > 0 else 0

        return {
            "config_score": config_score,
            "issues": issues,
            "optimal_configs": self.optimal_configs
        }


if __name__ == "__main__":
    optimizer = ConfigOptimizer()
    print("收集性能指标...")
    optimizer.collect_performance_metrics()

    print("\n检测性能瓶颈...")
    bottlenecks = optimizer.detect_performance_bottlenecks()
    print(json.dumps(bottlenecks, indent=2, ensure_ascii=False))

    print("\n评估配置优化建议...")
    config_evaluation = optimizer.evaluate_config()
    print(json.dumps(config_evaluation, indent=2, ensure_ascii=False))