import os
import time
import json
import numpy as np
from datetime import datetime, timedelta
from typing import Dict, List, Any, Union, Optional
import pickle
import yaml

# 导入数据库模块
from mysql_database import KylinDatabase

# 导入所有ML算法
from detector import (
    IsolationForest, OneClassSVM, Autoencoder,
    FeatureEngineering, SimpleLSTM, SimpleDBSCAN,
    Color
)


# 新增高级模型算法
class EnhancedIsolationForest:
    """增强版孤立森林 - 支持动态特征权重和自适应阈值"""

    def __init__(self, n_estimators: int = 100, max_samples: int = 256, contamination: float = 0.1):
        self.n_estimators = n_estimators
        self.max_samples = max_samples
        self.contamination = contamination
        self.trees: List[Dict] = []
        self.threshold_: Optional[float] = None
        self.feature_weights = None  # 特征权重
        self.adaptive_threshold = True  # 自适应阈值
        self.performance_history = []  # 性能历史

    def fit(self, X: Union[List[List[float]], np.ndarray]) -> 'EnhancedIsolationForest':
        """训练增强版孤立森林（修复版）"""
        X = np.asarray(X)

        try:
            # 数据预处理
            if np.any(np.isnan(X)) or np.any(np.isinf(X)):
                X = np.nan_to_num(X, nan=0.0, posinf=0.0, neginf=0.0)

            # 计算特征重要性权重
            self.feature_weights = self._calculate_feature_importance(X)

            # 使用加权特征训练
            X_weighted = X * self.feature_weights.reshape(1, -1)

            # 检查加权后的数据
            if np.any(np.isnan(X_weighted)) or np.any(np.isinf(X_weighted)):
                print("⚠️  加权后数据包含无效值，使用原始数据")
                X_weighted = X

            # 训练孤立森林
            n_samples = X_weighted.shape[0]
            max_samples = min(self.max_samples, n_samples)

            # 构建多棵树
            self.trees = []
            for i in range(self.n_estimators):
                try:
                    sample_indices = np.random.choice(n_samples, max_samples, replace=False)
                    tree = self._build_tree(X_weighted[sample_indices])
                    self.trees.append(tree)
                except Exception as e:
                    print(f"⚠️  第 {i + 1} 棵树构建失败: {e}，跳过")
                    continue

            if not self.trees:
                raise ValueError("没有成功构建任何树")

            # 计算异常分数并设置阈值
            scores = self.decision_function(X)
            if len(scores) > 0:
                self.threshold_ = np.percentile(scores, 100 * (1 - self.contamination))
            else:
                self.threshold_ = 0.5

            # 记录训练性能
            self.performance_history.append({
                'timestamp': datetime.now(),
                'mean_score': np.mean(scores) if len(scores) > 0 else 0.0,
                'std_score': np.std(scores) if len(scores) > 0 else 0.0,
                'contamination': self.contamination
            })

        except Exception as e:
            print(f"❌ 增强版孤立森林训练失败: {e}")
            raise

        return self

    def _build_tree(self, X: np.ndarray, depth: int = 0, max_depth: Optional[int] = None) -> Dict:
        """构建孤立树"""
        if max_depth is None:
            max_depth = int(np.ceil(np.log2(X.shape[0])))

        if depth >= max_depth or X.shape[0] <= 1:
            return {'left': None, 'right': None, 'size': X.shape[0]}

        n_features = X.shape[1]
        feature_idx = np.random.randint(n_features)
        feature_vals = X[:, feature_idx]
        min_val, max_val = feature_vals.min(), feature_vals.max()

        if min_val == max_val:
            return {'left': None, 'right': None, 'size': X.shape[0]}

        split_val = np.random.uniform(min_val, max_val)
        left_mask = feature_vals < split_val
        right_mask = ~left_mask

        return {
            'feature_idx': feature_idx,
            'split_val': split_val,
            'left': self._build_tree(X[left_mask], depth + 1, max_depth),
            'right': self._build_tree(X[right_mask], depth + 1, max_depth),
            'size': X.shape[0]
        }

    def _path_length(self, x: np.ndarray, tree: Dict) -> int:
        """计算样本在树中的路径长度"""
        if tree['left'] is None and tree['right'] is None:
            return 0

        if 'feature_idx' not in tree:
            return 0

        feature_val = x[tree['feature_idx']]
        if feature_val < tree['split_val']:
            return 1 + self._path_length(x, tree['left'])
        else:
            return 1 + self._path_length(x, tree['right'])

    def decision_function(self, X: Union[List[List[float]], np.ndarray]) -> np.ndarray:
        """计算异常分数"""
        X = np.asarray(X)
        scores = np.zeros(len(X))

        for i, x in enumerate(X):
            path_lengths = []
            for tree in self.trees:
                path_length = self._path_length(x, tree)
                path_lengths.append(path_length)

            # 计算平均路径长度
            avg_path_length = np.mean(path_lengths)
            scores[i] = avg_path_length

        return scores

    def predict(self, X: Union[List[List[float]], np.ndarray]) -> np.ndarray:
        """预测异常"""
        scores = self.decision_function(X)
        return np.where(scores > self.threshold_, 1, 0)

    def _calculate_feature_importance(self, X: np.ndarray) -> np.ndarray:
        """计算特征重要性权重（修复版）"""
        n_features = X.shape[1]
        importance = np.ones(n_features)

        try:
            # 基于方差计算重要性
            variances = np.var(X, axis=0)

            # 处理零方差特征
            zero_variance_mask = variances == 0
            if np.any(zero_variance_mask):
                print(f"⚠️  发现 {np.sum(zero_variance_mask)} 个零方差特征，设置最小权重")
                variances[zero_variance_mask] = 1e-8

            importance = variances / (np.sum(variances) + 1e-8)

            # 基于相关性计算重要性（避免数值问题）
            try:
                correlations = np.corrcoef(X.T)
                np.fill_diagonal(correlations, 0)
                correlation_importance = 1 - np.mean(np.abs(correlations), axis=1)

                # 处理无效的相关性值
                correlation_importance = np.nan_to_num(correlation_importance, nan=0.5, posinf=1.0, neginf=0.0)
                correlation_importance = np.clip(correlation_importance, 0.1, 1.0)

                importance = importance * correlation_importance
            except Exception as e:
                print(f"⚠️  相关性计算失败: {e}，使用方差权重")

            # 归一化权重
            importance = importance / (np.sum(importance) + 1e-8)

            # 确保所有权重都是有效的
            importance = np.nan_to_num(importance, nan=1.0 / n_features, posinf=1.0, neginf=0.0)
            importance = np.clip(importance, 1e-8, 1.0)
            importance = importance / np.sum(importance)

        except Exception as e:
            print(f"⚠️  特征重要性计算失败: {e}，使用均匀权重")
            importance = np.ones(n_features) / n_features

        return importance

    def predict_with_confidence(self, X: Union[List[List[float]], np.ndarray]) -> tuple:
        """预测异常并返回置信度"""
        scores = self.decision_function(X)
        predictions = np.where(scores > self.threshold_, 1, 0)

        # 计算置信度（基于分数到阈值的距离）
        confidence = np.abs(scores - self.threshold_) / (np.max(scores) - np.min(scores) + 1e-8)
        confidence = np.clip(confidence, 0, 1)

        return predictions, confidence


class EnsembleAnomalyDetector:
    """集成异常检测器 - 组合多个模型"""

    def __init__(self, models: Dict[str, Any] = None):
        self.models = models or {}
        self.model_weights = {}
        self.ensemble_threshold = 0.5

    def add_model(self, name: str, model: Any, weight: float = 1.0):
        """添加模型到集成"""
        self.models[name] = model
        self.model_weights[name] = weight

    def fit(self, X: Union[List[List[float]], np.ndarray]) -> 'EnsembleAnomalyDetector':
        """训练所有模型"""
        X = np.asarray(X)

        for name, model in self.models.items():
            print(f"训练模型: {name}")
            if hasattr(model, 'fit'):
                model.fit(X)

        return self

    def predict(self, X: Union[List[List[float]], np.ndarray]) -> np.ndarray:
        """集成预测"""
        X = np.asarray(X)
        predictions = {}
        scores = {}

        # 获取每个模型的预测
        for name, model in self.models.items():
            if hasattr(model, 'predict'):
                predictions[name] = model.predict(X)
            if hasattr(model, 'decision_function'):
                scores[name] = model.decision_function(X)

        # 加权投票
        ensemble_scores = np.zeros(X.shape[0])
        total_weight = 0

        for name, score in scores.items():
            weight = self.model_weights.get(name, 1.0)
            ensemble_scores += score * weight
            total_weight += weight

        if total_weight > 0:
            ensemble_scores /= total_weight

        # 基于阈值判断异常
        return np.where(ensemble_scores > self.ensemble_threshold, 1, 0)


class AdvancedAutoencoder:
    """高级自编码器 - 支持多层架构和正则化"""

    def __init__(self, input_dim: int, hidden_dims: List[int] = None,
                 epochs: int = 100, lr: float = 0.0005, dropout_rate: float = 0.1):
        if hidden_dims is None:
            # 更合理的网络结构，避免维度过小
            if input_dim >= 50:
                hidden_dims = [min(32, input_dim // 2), min(16, input_dim // 4), min(8, input_dim // 8)]
            else:
                hidden_dims = [min(16, input_dim // 2), min(8, input_dim // 4)]
        self.input_dim = input_dim
        self.hidden_dims = hidden_dims
        self.epochs = epochs
        self.lr = lr
        self.dropout_rate = dropout_rate
        self.encoder_weights = []
        self.encoder_biases = []
        self.decoder_weights = []
        self.decoder_biases = []
        self.mean = None
        self.std = None
        self.threshold_ = None
        self._initialize_advanced_architecture()

    def _initialize_advanced_architecture(self):
        """初始化多层架构"""
        # 编码器层
        prev_dim = self.input_dim
        for hidden_dim in self.hidden_dims:
            if hidden_dim > 0:  # 确保维度大于0
                self.encoder_weights.append(
                    np.random.randn(prev_dim, hidden_dim) * np.sqrt(2.0 / prev_dim)
                )
                self.encoder_biases.append(np.zeros(hidden_dim))
                prev_dim = hidden_dim

        # 解码器层（对称结构）
        decoder_dims = list(reversed(self.hidden_dims[:-1])) + [self.input_dim]
        for hidden_dim in decoder_dims:
            if hidden_dim > 0:  # 确保维度大于0
                self.decoder_weights.append(
                    np.random.randn(prev_dim, hidden_dim) * np.sqrt(2.0 / prev_dim)
                )
                self.decoder_biases.append(np.zeros(hidden_dim))
                prev_dim = hidden_dim

    def _apply_dropout(self, x: np.ndarray, training: bool = True) -> np.ndarray:
        """应用Dropout正则化"""
        if training and self.dropout_rate > 0:
            mask = np.random.binomial(1, 1 - self.dropout_rate, size=x.shape) / (1 - self.dropout_rate)
            return x * mask
        return x

    def sigmoid(self, x: np.ndarray) -> np.ndarray:
        """Sigmoid激活函数"""
        return 1 / (1 + np.exp(-np.clip(x, -500, 500)))

    def fit(self, X: Union[List[List[float]], np.ndarray]) -> 'AdvancedAutoencoder':
        """训练高级自编码器"""
        X = np.asarray(X)
        n_samples = X.shape[0]

        # 检查输入维度是否与初始化时一致
        if X.shape[1] != self.input_dim:
            raise ValueError(f"输入维度不匹配：期望 {self.input_dim}，实际 {X.shape[1]}")

        # 数据标准化
        self.mean = np.mean(X, axis=0)
        self.std = np.std(X, axis=0) + 1e-6
        X_norm = (X - self.mean) / self.std

        # 训练循环
        best_loss = float('inf')
        patience = 0
        max_patience = 20  # 早停机制

        for epoch in range(self.epochs):
            try:
                # 前向传播
                encoded = self._encode(X_norm, training=True)
                decoded = self._decode(encoded, training=True)

                # 计算损失
                reconstruction_error = np.mean(np.square(X_norm - decoded))

                # 添加正则化损失
                l2_regularization = 0
                for weights in self.encoder_weights + self.decoder_weights:
                    l2_regularization += np.sum(weights ** 2)
                l2_regularization *= 0.0001  # L2正则化系数

                total_loss = reconstruction_error + l2_regularization

                # 早停机制
                if total_loss < best_loss:
                    best_loss = total_loss
                    patience = 0
                else:
                    patience += 1

                # 显示训练进度
                if epoch % 10 == 0:
                    print(f"Epoch {epoch}: Loss = {total_loss:.6f} (Best: {best_loss:.6f})")

                # 早停检查
                if patience >= max_patience:
                    print(f"早停：{max_patience}个epoch没有改善，停止训练")
                    break

            except Exception as e:
                print(f"训练过程中出错: {e}")
                # 继续训练，不抛出异常
                continue

        # 设置异常阈值
        try:
            reconstructions = self.reconstruct(X)
            errors = np.mean(np.square(X - reconstructions), axis=1)
            self.threshold_ = np.percentile(errors, 90)
        except Exception as e:
            print(f"设置异常阈值时出错: {e}")
            self.threshold_ = 0.1  # 默认阈值

        return self

    def _encode(self, X: np.ndarray, training: bool = False) -> np.ndarray:
        """编码过程"""
        encoded = X
        for i, (weights, bias) in enumerate(zip(self.encoder_weights, self.encoder_biases)):
            encoded = np.dot(encoded, weights) + bias
            encoded = self.sigmoid(encoded)
            encoded = self._apply_dropout(encoded, training)
        return encoded

    def _decode(self, encoded: np.ndarray, training: bool = False) -> np.ndarray:
        """解码过程"""
        decoded = encoded
        for i, (weights, bias) in enumerate(zip(self.decoder_weights, self.decoder_biases)):
            decoded = np.dot(decoded, weights) + bias
            decoded = self.sigmoid(decoded)
            decoded = self._apply_dropout(decoded, training)
        return decoded

    def reconstruct(self, X: Union[List[List[float]], np.ndarray]) -> np.ndarray:
        """重构输入数据"""
        if self.mean is None or self.std is None:
            raise ValueError("模型尚未训练，请先调用fit方法")

        X = np.asarray(X)
        X_norm = (X - self.mean) / self.std
        encoded = self._encode(X_norm, training=False)
        decoded = self._decode(encoded, training=False)
        return decoded * self.std + self.mean

    def predict(self, X: Union[List[List[float]], np.ndarray]) -> np.ndarray:
        """预测异常"""
        if self.threshold_ is None:
            raise ValueError("模型尚未训练，请先调用fit方法")

        reconstructions = self.reconstruct(X)
        errors = np.mean(np.square(X - reconstructions), axis=1)
        return np.where(errors > self.threshold_, 1, 0)


class AdvancedFeatureEngineering(FeatureEngineering):
    """高级特征工程 - 增强特征提取能力"""

    @staticmethod
    def extract_temporal_features(timestamps: List[datetime], values: List[float]) -> Dict[str, float]:
        """提取时间特征"""
        if len(timestamps) < 2:
            return {}

        # 时间间隔特征
        intervals = []
        for i in range(1, len(timestamps)):
            interval = (timestamps[i] - timestamps[i - 1]).total_seconds()
            intervals.append(interval)

        # 时间模式特征
        hour_of_day = [ts.hour for ts in timestamps]
        day_of_week = [ts.weekday() for ts in timestamps]
        is_weekend = [1 if ts.weekday() >= 5 else 0 for ts in timestamps]

        return {
            'avg_interval': np.mean(intervals),
            'std_interval': np.std(intervals),
            'hour_pattern': np.mean(hour_of_day),
            'weekend_ratio': np.mean(is_weekend),
            'time_variance': np.var(values)
        }

    @staticmethod
    def extract_frequency_features(values: List[float], window_size: int = 10) -> Dict[str, float]:
        """提取频域特征"""
        if len(values) < window_size:
            return {}

        # 使用FFT提取频域特征
        fft_values = np.fft.fft(values[-window_size:])
        magnitude_spectrum = np.abs(fft_values)

        return {
            'dominant_frequency': np.argmax(magnitude_spectrum[1:window_size // 2]) + 1,
            'spectral_energy': np.sum(magnitude_spectrum ** 2),
            'spectral_entropy': -np.sum(magnitude_spectrum * np.log(magnitude_spectrum + 1e-8)),
            'spectral_centroid': np.sum(np.arange(len(magnitude_spectrum)) * magnitude_spectrum) / np.sum(
                magnitude_spectrum)
        }

    @staticmethod
    def extract_statistical_features(values: List[float]) -> Dict[str, float]:
        """提取统计特征"""
        if len(values) < 2:
            return {}

        values_array = np.array(values)

        return {
            'mean': np.mean(values_array),
            'std': np.std(values_array),
            'skewness': AdvancedFeatureEngineering._calculate_skewness(values_array),
            'kurtosis': AdvancedFeatureEngineering._calculate_kurtosis(values_array),
            'range': np.max(values_array) - np.min(values_array),
            'iqr': np.percentile(values_array, 75) - np.percentile(values_array, 25),
            'cv': np.std(values_array) / (np.mean(values_array) + 1e-8)  # 变异系数
        }

    @staticmethod
    def _calculate_skewness(values: np.ndarray) -> float:
        """计算偏度"""
        mean = np.mean(values)
        std = np.std(values)
        if std == 0:
            return 0
        return np.mean(((values - mean) / std) ** 3)

    @staticmethod
    def _calculate_kurtosis(values: np.ndarray) -> float:
        """计算峰度"""
        mean = np.mean(values)
        std = np.std(values)
        if std == 0:
            return 0
        return np.mean(((values - mean) / std) ** 4) - 3


class ModelTrainerOptimized:
    """优化的模型训练器 - 专注于核心功能"""

    def __init__(self, model_dir: str = "trained_models", use_database: bool = True):
        self.model_dir = model_dir
        self.training_data: List[Dict] = []
        self.use_database = use_database

       # 初始化数据库连接
        if self.use_database:
            self.db = KylinDatabase()  # 移除硬编码的 password='Huawei@678' 参数
            if not self.db.connect():
                print("警告: 数据库连接失败，将使用本地文件存储")
                self.use_database = False
        else:
            self.db = None

        self.ensure_model_directory()

        # 初始化高级模型
        self.enhanced_models = {}
        self.ensemble_detector = EnsembleAnomalyDetector()
        self.feature_engineering = AdvancedFeatureEngineering()

    def ensure_model_directory(self):
        """确保模型目录存在"""
        if not os.path.exists(self.model_dir):
            os.makedirs(self.model_dir)
            print(f"创建模型目录: {self.model_dir}")

    def collect_training_data(self, metrics: Dict, duration_hours: int = 2):  # 改为2小时，避免数据过早删除
        """收集训练数据"""
        timestamp = datetime.now()
        features = self._extract_features_for_training(metrics)
        data_point = {
            'timestamp': timestamp,
            'metrics': metrics,
            'features': features
        }

        # 保存到数据库
        if self.use_database and self.db:
            if self.db.save_training_data(metrics, features, timestamp):
                print(f"✓ 训练数据已保存到数据库")
            else:
                print(f"✗ 保存训练数据到数据库失败")

        # 同时保存到内存（用于快速访问）
        self.training_data.append(data_point)

        # 清理旧数据（保留指定时长）
        cutoff_time = timestamp - timedelta(hours=duration_hours)
        self.training_data = [d for d in self.training_data if d['timestamp'] > cutoff_time]

        # 显示内存和数据库中的数据量
        memory_count = len(self.training_data)
        if self.use_database and self.db:
            try:
                db_data = self.db.get_training_data(hours=24)  # 获取24小时内的数据
                db_count = len(db_data)
                print(f"收集训练数据点，内存数据量: {memory_count}, 数据库总数据量: {db_count}")
            except Exception as e:
                print(f"收集训练数据点，当前数据量: {memory_count} (数据库查询失败: {e})")
        else:
            print(f"收集训练数据点，当前数据量: {memory_count}")

    def _extract_features_for_training(self, metrics: Dict) -> Dict:
        """为训练提取特征（与kylin_detector.py保持一致）"""
        features = {}

        # 基础指标特征（固定特征集，确保维度一致）
        features['cpu_usage'] = metrics['cpu']['usage']
        features['cpu_user'] = metrics['cpu'].get('user', 0)
        features['cpu_system'] = metrics['cpu'].get('system', 0)
        features['cpu_idle'] = metrics['cpu'].get('idle', 0)
        features['cpu_iowait'] = metrics['cpu'].get('iowait', 0)
        features['cpu_irq'] = metrics['cpu'].get('irq', 0)
        features['cpu_softirq'] = metrics['cpu'].get('softirq', 0)
        features['cpu_steal'] = metrics['cpu'].get('steal', 0)
        features['cpu_guest'] = metrics['cpu'].get('guest', 0)
        features['memory_usage'] = metrics['memory']['usage']
        features['memory_available'] = metrics['memory'].get('available', 0)
        features['memory_used'] = metrics['memory'].get('used', 0)
        features['memory_free'] = metrics['memory'].get('free', 0)
        features['memory_cached'] = metrics['memory'].get('cached', 0)
        features['memory_buffers'] = metrics['memory'].get('buffers', 0)
        features['memory_swap_used'] = metrics['memory'].get('swap_used', 0)
        features['disk_usage'] = metrics['disk']['usage']
        features['disk_read_bytes'] = metrics['disk'].get('read_bytes', 0)
        features['disk_write_bytes'] = metrics['disk'].get('write_bytes', 0)
        features['disk_read_count'] = metrics['disk'].get('read_count', 0)
        features['disk_write_count'] = metrics['disk'].get('write_count', 0)
        features['disk_read_time'] = metrics['disk'].get('read_time', 0)
        features['disk_write_time'] = metrics['disk'].get('write_time', 0)
        features['network_in'] = metrics['network']['recv_rate']
        features['network_out'] = metrics['network']['sent_rate']
        features['network_packets_in'] = metrics['network'].get('packets_recv', 0)
        features['network_packets_out'] = metrics['network'].get('packets_sent', 0)
        features['network_err_in'] = metrics['network'].get('errin', 0)
        features['network_err_out'] = metrics['network'].get('errout', 0)
        features['network_drop_in'] = metrics['network'].get('dropin', 0)
        features['network_drop_out'] = metrics['network'].get('dropout', 0)

        # 数据库详细指标特征
        features['mysql_connections'] = metrics['services']['mysql']['connections']
        features['mysql_slow_queries'] = metrics['services']['mysql']['slow_queries']
        features['mysql_threads_connected'] = metrics['services']['mysql'].get('threads_connected', 0)
        features['mysql_threads_running'] = metrics['services']['mysql'].get('threads_running', 0)
        features['mysql_uptime'] = metrics['services']['mysql'].get('uptime', 0)
        features['mysql_questions'] = metrics['services']['mysql'].get('questions', 0)
        features['mysql_slow_queries'] = metrics['services']['mysql'].get('slow_queries', 0)
        features['mysql_opens'] = metrics['services']['mysql'].get('opens', 0)
        features['mysql_flush_commands'] = metrics['services']['mysql'].get('flush_commands', 0)

        # Redis详细指标特征
        features['redis_connected_clients'] = metrics['services']['redis'].get('connected_clients', 0)
        features['redis_used_memory'] = metrics['services']['redis'].get('used_memory', 0)
        features['redis_total_commands'] = metrics['services']['redis'].get('total_commands_processed', 0)
        features['redis_keyspace_hits'] = metrics['services']['redis'].get('keyspace_hits', 0)
        features['redis_keyspace_misses'] = metrics['services']['redis'].get('keyspace_misses', 0)
        features['redis_evicted_keys'] = metrics['services']['redis'].get('evicted_keys', 0)
        features['redis_expired_keys'] = metrics['services']['redis'].get('expired_keys', 0)

        # Web服务详细指标特征
        features['nginx_active_connections'] = metrics['services']['nginx'].get('active_connections', 0)
        features['nginx_5xx_errors'] = metrics['services']['nginx'].get('5xx_errors', 0)
        features['nginx_requests_per_second'] = metrics['services']['nginx'].get('requests_per_second', 0)
        features['nginx_4xx_errors'] = metrics['services']['nginx'].get('4xx_errors', 0)
        features['nginx_total_requests'] = metrics['services']['nginx'].get('total_requests', 0)

        features['apache_5xx_errors'] = metrics['services']['apache'].get('5xx_errors', 0)
        features['apache_requests_per_second'] = metrics['services']['apache'].get('requests_per_second', 0)

        # 系统日志相关特征
        features['error_log_count'] = metrics.get('error_log_count', 0)
        features['warning_log_count'] = metrics.get('warning_log_count', 0)
        features['critical_log_count'] = metrics.get('critical_log_count', 0)

        # 服务状态特征（转换为数值）
        features['mysql_running'] = 1 if metrics['services']['mysql']['running'] else 0
        features['redis_running'] = 1 if metrics['services']['redis']['running'] else 0
        features['nginx_running'] = 1 if metrics['services']['nginx']['running'] else 0
        features['apache_running'] = 1 if metrics['services']['apache']['running'] else 0

        # 添加NPU特征（如果可用）- 限制为最多2个NPU设备
        if metrics['npu']['available']:
            for i, device in enumerate(metrics['npu']['devices'][:2]):  # 最多2个NPU设备
                features[f'npu_{i}_usage'] = device['usage']
                features[f'npu_{i}_temp'] = device['temperature']
                features[f'npu_{i}_mem_usage'] = device['memory_usage']
                features[f'npu_{i}_power'] = device.get('power_usage', 0.0)
                features[f'npu_{i}_frequency'] = device.get('frequency', 0.0)
        else:
            # 如果没有NPU，添加默认值
            for i in range(2):
                features[f'npu_{i}_usage'] = 0.0
                features[f'npu_{i}_temp'] = 0.0
                features[f'npu_{i}_mem_usage'] = 0.0
                features[f'npu_{i}_power'] = 0.0
                features[f'npu_{i}_frequency'] = 0.0

        # 滑动窗口特征（限制数量）
        for key in ['cpu_usage', 'memory_usage', 'disk_usage']:
            # 使用当前值作为默认值（因为没有历史数据）
            features[f"{key}_window_mean"] = features[key]
            features[f"{key}_window_std"] = 0.0
            features[f"{key}_window_max"] = features[key]
            features[f"{key}_window_min"] = features[key]
            features[f"{key}_window_range"] = 0.0
            features[f"{key}_window_trend"] = 0.0

        # 周期性特征（限制数量）
        for key in ['cpu_usage', 'memory_usage']:
            features[f"{key}_periodic_deviation"] = 0.0

        # 交叉特征（限制数量，避免特征爆炸）
        # 只保留最重要的交叉特征
        features['cross_cpu_usage_memory_usage'] = features['cpu_usage'] * features['memory_usage']
        features['cross_cpu_usage_disk_usage'] = features['cpu_usage'] * features['disk_usage']
        features['cross_memory_usage_disk_usage'] = features['memory_usage'] * features['disk_usage']
        features['cross_mysql_connections_redis_connected_clients'] = features['mysql_connections'] * features[
            'redis_connected_clients']
        features['cross_nginx_active_connections_apache_requests_per_second'] = features['nginx_active_connections'] * \
                                                                                features['apache_requests_per_second']

        return features

    def has_sufficient_data(self, min_samples: int = 720) -> bool:  # 1小时数据 (1*60*60/5=720)
        """检查是否有足够的训练数据"""
        if self.use_database and self.db:
            # 从数据库获取数据量
            db_data = self.db.get_training_data(hours=1)  # 改为1小时
            recent_count = len(db_data)

            # 调试输出：显示最近1小时的数据量
            if recent_count < min_samples:
                print(f"⚠️  最近1小时数据量: {recent_count}, 需要: {min_samples}")

            return recent_count >= min_samples
        else:
            # 使用内存中的数据
            return len(self.training_data) >= min_samples

    def train_models(self, model_version: str = None, use_extended_data: bool = False) -> Dict:
        """训练所有模型"""
        if not self.has_sufficient_data() and not use_extended_data:
            if self.use_database and self.db:
                db_data = self.db.get_training_data(hours=1)  # 改为1小时
                raise ValueError(f"训练数据不足，需要至少720个样本（1小时），当前只有{len(db_data)}个")
            else:
                raise ValueError(f"训练数据不足，需要至少720个样本（1小时），当前只有{len(self.training_data)}个")

        # 获取训练数据
        if self.use_database and self.db:
            if use_extended_data:
                # 使用扩展数据（24小时）
                training_data = self.db.get_training_data(hours=24)
                print(f"从数据库获取扩展数据{len(training_data)}个训练数据点（24小时）...")
            else:
                # 使用标准数据（1小时）
                training_data = self.db.get_training_data(hours=1)
                print(f"从数据库获取{len(training_data)}个训练数据点（1小时）...")
        else:
            training_data = self.training_data
            print(f"使用内存中{len(training_data)}个数据点...")

        # 准备训练数据
        feature_data = []
        timestamps = []
        for data_point in training_data:
            features = data_point['features']
            feature_vector = list(features.values())
            feature_data.append(feature_vector)
            timestamps.append(data_point['timestamp'])

        X = np.array(feature_data)

        # 数据清理和验证
        print("开始数据清理和验证...")

        # 1. 检查并处理无效值
        if np.any(np.isnan(X)) or np.any(np.isinf(X)):
            print("⚠️  发现无效值（NaN/Inf），开始清理...")
            # 用0替换NaN和Inf
            X = np.nan_to_num(X, nan=0.0, posinf=0.0, neginf=0.0)
            print(f"✅ 数据清理完成，替换了 {np.sum(np.isnan(X)) + np.sum(np.isinf(X))} 个无效值")

        # 2. 检查数据范围
        print(f"数据形状: {X.shape}")
        print(f"数据范围: [{np.min(X):.4f}, {np.max(X):.4f}]")
        print(f"数据均值: {np.mean(X):.4f}")
        print(f"数据标准差: {np.std(X):.4f}")

        # 3. 检查是否有全零或常数特征
        zero_features = np.sum(np.all(X == 0, axis=0))
        constant_features = np.sum(np.all(X == X[0], axis=0))
        if zero_features > 0:
            print(f"⚠️  发现 {zero_features} 个全零特征")
        if constant_features > 0:
            print(f"⚠️  发现 {constant_features} 个常数特征")

        # 4. 数据标准化（可选，但有助于训练稳定性）
        try:
            from sklearn.preprocessing import StandardScaler
            scaler = StandardScaler()
            X_scaled = scaler.fit_transform(X)
            print("✅ 数据标准化完成")
            X = X_scaled
        except ImportError:
            print("⚠️  sklearn不可用，跳过标准化")

        # 训练基础模型
        models = {}

        # 1. 增强版孤立森林（可选）
        print("训练增强版孤立森林模型...")
        try:
            enhanced_iso_forest = EnhancedIsolationForest(n_estimators=100, contamination=0.1)
            enhanced_iso_forest.fit(X)
            models['enhanced_isolation_forest'] = enhanced_iso_forest
            print("✅ 增强版孤立森林训练成功")
        except Exception as e:
            print(f"❌ 增强版孤立森林训练失败: {e}")
            print("跳过增强版孤立森林训练（不影响其他模型）")

        # 2. 高级自编码器
        print("训练高级自编码器模型...")
        try:
            input_dim = X.shape[1]
            # 使用简化的隐藏层配置
            hidden_dims = [min(8, input_dim), min(4, input_dim // 2)]

            advanced_autoencoder = AdvancedAutoencoder(
                input_dim=input_dim,
                hidden_dims=hidden_dims,
                epochs=30
            )
            advanced_autoencoder.fit(X)
            models['advanced_autoencoder'] = advanced_autoencoder
            print("高级自编码器训练成功")
        except Exception as e:
            print(f"高级自编码器训练失败: {e}")
            print("跳过高级自编码器训练")

        # 3. 原始模型（保持兼容性）
        print("训练原始模型...")
        try:
            iso_forest = IsolationForest(n_estimators=100, contamination=0.1)
            iso_forest.fit(X)
            models['isolation_forest'] = iso_forest
            print("✅ 原始孤立森林训练成功")
        except Exception as e:
            print(f"❌ 原始孤立森林训练失败: {e}")

        try:
            oc_svm = OneClassSVM(nu=0.1, kernel='rbf')
            oc_svm.fit(X)
            models['one_class_svm'] = oc_svm
            print("✅ 单类SVM训练成功")
        except Exception as e:
            print(f"❌ 单类SVM训练失败: {e}")

        # 确保隐藏层维度不超过输入维度
        hidden_dim = min(8, X.shape[1])
        if hidden_dim > 0:
            try:
                autoencoder = Autoencoder(input_dim=X.shape[1], hidden_dim=hidden_dim, epochs=20)
                autoencoder.fit(X)
                models['autoencoder'] = autoencoder
                print("✅ 原始自编码器训练成功")
            except Exception as e:
                print(f"❌ 原始自编码器训练失败: {e}")
        else:
            print("跳过原始自编码器训练：输入维度太小")

        # 4. LSTM（用于时间序列预测）
        print("训练LSTM模型...")
        try:
            # 简化LSTM配置，避免维度问题
            input_size = min(X.shape[1], 10)  # 限制输入维度
            hidden_size = min(8, input_size)  # 限制隐藏层大小

            lstm = SimpleLSTM(input_size=input_size, hidden_size=hidden_size, output_size=1)

            # 准备LSTM训练数据
            if len(X) > 10:
                for i in range(len(X) - 10):
                    # 只使用前input_size个特征
                    sequence = X[i:i + 10, :input_size].tolist()
                    # 使用第一个特征作为目标
                    target = np.mean(X[i + 10:i + 15, 0]) if i + 15 < len(X) else X[-1, 0]
                    lstm.train_step(sequence, target)

                models['lstm'] = lstm
                print("✅ LSTM模型训练成功")
            else:
                print("跳过LSTM训练：数据量不足")
        except Exception as e:
            print(f"❌ LSTM模型训练失败: {e}")
            print("跳过LSTM训练")

        # 5. 集成模型
        print("训练集成模型...")
        try:
            # 只添加成功训练的模型到集成
            if 'isolation_forest' in models:
                self.ensemble_detector.add_model('iso_forest', models['isolation_forest'], weight=1.0)
            if 'one_class_svm' in models:
                self.ensemble_detector.add_model('oc_svm', models['one_class_svm'], weight=0.8)
            if 'autoencoder' in models:
                self.ensemble_detector.add_model('autoencoder', models['autoencoder'], weight=0.9)
            if 'advanced_autoencoder' in models:
                self.ensemble_detector.add_model('enhanced_autoencoder', models['advanced_autoencoder'], weight=1.1)
            if 'lstm' in models:
                self.ensemble_detector.add_model('lstm', models['lstm'], weight=0.7)
            if 'enhanced_isolation_forest' in models:
                self.ensemble_detector.add_model('enhanced_iso_forest', models['enhanced_isolation_forest'], weight=1.2)

            models['ensemble_detector'] = self.ensemble_detector
            print("✅ 集成模型训练完成")
        except Exception as e:
            print(f"❌ 集成模型训练失败: {e}")
            print("跳过集成模型训练")

        # 检查是否有模型训练成功
        if not models:
            raise ValueError("❌ 所有模型训练都失败了，请检查数据质量")

        print(f"✅ 成功训练了 {len(models)} 个模型")
        for model_name in models.keys():
            print(f"  - {model_name}")

        # 保存模型
        if model_version is None:
            model_version = datetime.now().strftime("%Y%m%d_%H%M%S")

        # 保存到数据库
        if self.use_database and self.db:
            for model_name, model in models.items():
                model_info = {
                    'model_type': model_name,
                    'training_samples': len(training_data),
                    'feature_dim': X.shape[1],
                    'created_at': datetime.now().isoformat()
                }

                if self.db.save_model(model_name, model_version, model, model_info, len(training_data), X.shape[1]):
                    print(f"✓ 模型 {model_name} 已保存到数据库")
                else:
                    print(f"✗ 保存模型 {model_name} 到数据库失败")

        # 同时保存到本地文件（备份）
        self._save_models(models, model_version)

        print(f"✅ 模型训练完成，版本: {model_version}")
        return models

    def _save_models(self, models: Dict, version: str):
        """保存模型到磁盘"""
        model_path = os.path.join(self.model_dir, f"models_v{version}.pkl")

        with open(model_path, 'wb') as f:
            pickle.dump(models, f)

        # 保存模型信息
        info_path = os.path.join(self.model_dir, f"model_info_v{version}.json")
        model_info = {
            'version': version,
            'created_at': datetime.now().isoformat(),
            'models': list(models.keys()),
            'training_samples': len(self.training_data),
            'feature_dim': len(self.training_data[0]['features']) if self.training_data else 0,
            'model_types': {
                'enhanced_isolation_forest': 'Enhanced Isolation Forest',
                'advanced_autoencoder': 'Advanced Autoencoder',
                'isolation_forest': 'Original Isolation Forest',
                'one_class_svm': 'One-Class SVM',
                'autoencoder': 'Original Autoencoder',
                'lstm': 'LSTM for Time Series',
                'ensemble_detector': 'Ensemble Detector'
            }
        }

        with open(info_path, 'w', encoding='utf-8') as f:
            json.dump(model_info, f, ensure_ascii=False, indent=2)

        print(f"模型已保存到: {model_path}")

    def load_models(self, version: str = None) -> Dict:
        """加载训练好的模型"""
        # 优先从数据库加载
        if self.use_database and self.db:
            try:
                models = {}
                model_names = ['isolation_forest', 'one_class_svm', 'autoencoder', 'enhanced_isolation_forest',
                               'advanced_autoencoder']

                for model_name in model_names:
                    model = self.db.load_model(model_name, version)
                    if model:
                        models[model_name] = model

                if models:
                    print(f"从数据库加载了 {len(models)} 个模型")
                    return models
            except Exception as e:
                print(f"从数据库加载模型失败: {e}")

        # 从本地文件加载（备用）
        if not os.path.exists(self.model_dir):
            return {}

        model_files = [f for f in os.listdir(self.model_dir) if f.startswith('models_v')]
        if not model_files:
            return {}

        # 选择版本
        if version is None:
            # 选择最新版本
            versions = [f.replace('models_v', '').replace('.pkl', '') for f in model_files]
            version = max(versions) if versions else None

        if version is None:
            return {}

        model_path = os.path.join(self.model_dir, f"models_v{version}.pkl")
        if not os.path.exists(model_path):
            return {}

        try:
            with open(model_path, 'rb') as f:
                models = pickle.load(f)
            print(f"从本地文件加载了 {len(models)} 个模型，版本: {version}")
            return models
        except Exception as e:
            print(f"从本地文件加载模型失败: {e}")
            return {}

    def get_model_info(self) -> Dict:
        """获取模型信息"""
        # 优先从数据库获取
        if self.use_database and self.db:
            models = self.db.get_available_models()
            if models:
                return {
                    'status': 'models_available',
                    'source': 'database',
                    'models': models,
                    'latest_version': max([m['model_version'] for m in models]) if models else None,
                    'training_data_count': len(self.training_data)
                }

        # 从本地文件获取（备用）
        if not os.path.exists(self.model_dir):
            return {'status': 'no_models', 'source': 'local'}

        model_files = [f for f in os.listdir(self.model_dir) if f.startswith('models_v')]
        if not model_files:
            return {'status': 'no_models', 'source': 'local'}

        versions = []
        for f in model_files:
            version = f.replace('models_v', '').replace('.pkl', '')
            info_path = os.path.join(self.model_dir, f"model_info_v{version}.json")

            if os.path.exists(info_path):
                with open(info_path, 'r', encoding='utf-8') as info_file:
                    info = json.load(info_file)
                    versions.append(info)

        return {
            'status': 'models_available',
            'source': 'local',
            'versions': versions,
            'latest_version': max([v['version'] for v in versions]) if versions else None,
            'training_data_count': len(self.training_data)
        }

    def _generate_mock_metrics(self, intervention_type: str = None) -> Dict:
        """生成模拟指标数据"""
        if intervention_type == 'cpu_stress':
            # CPU加压：模拟高负载
            return {
                'cpu': {
                    'usage': np.random.uniform(85, 98),
                    'user': np.random.uniform(30, 50),
                    'system': np.random.uniform(20, 40),
                    'idle': np.random.uniform(2, 15),
                    'iowait': np.random.uniform(5, 15),
                    'irq': np.random.uniform(0, 5),
                    'softirq': np.random.uniform(0, 5),
                    'steal': np.random.uniform(0, 2),
                    'guest': np.random.uniform(0, 1)
                },
                'memory': {
                    'usage': np.random.uniform(70, 90),
                    'available': np.random.uniform(500, 2000),
                    'used': np.random.uniform(14000, 18000),
                    'free': np.random.uniform(500, 2000),
                    'cached': np.random.uniform(1000, 3000),
                    'buffers': np.random.uniform(500, 1500),
                    'swap_used': np.random.uniform(0, 1000)
                },
                'disk': {
                    'usage': np.random.uniform(60, 85),
                    'read_bytes': np.random.uniform(15000, 30000),
                    'write_bytes': np.random.uniform(15000, 30000),
                    'read_count': np.random.uniform(100, 300),
                    'write_count': np.random.uniform(100, 300),
                    'read_time': np.random.uniform(50, 150),
                    'write_time': np.random.uniform(50, 150)
                },
                'network': {
                    'sent_rate': np.random.uniform(15000, 30000),
                    'recv_rate': np.random.uniform(15000, 30000),
                    'packets_sent': np.random.uniform(1500, 3000),
                    'packets_recv': np.random.uniform(1500, 3000),
                    'errin': 0, 'errout': 0, 'dropin': 0, 'dropout': 0
                },
                'services': {
                    'mysql': {
                        'connections': np.random.uniform(80, 150),
                        'slow_queries': np.random.uniform(5, 20),
                        'threads_connected': np.random.uniform(40, 80),
                        'threads_running': np.random.uniform(10, 30),
                        'uptime': np.random.uniform(1000, 10000),
                        'questions': np.random.uniform(1000, 5000),
                        'opens': np.random.uniform(50, 200),
                        'flush_commands': np.random.uniform(10, 50),
                        'running': True
                    },
                    'redis': {
                        'connected_clients': np.random.uniform(40, 80),
                        'used_memory': np.random.uniform(8000, 15000),
                        'total_commands_processed': np.random.uniform(15000, 30000),
                        'keyspace_hits': np.random.uniform(200, 500),
                        'keyspace_misses': np.random.uniform(10, 50),
                        'evicted_keys': np.random.uniform(0, 10),
                        'expired_keys': np.random.uniform(5, 20),
                        'running': True
                    },
                    'nginx': {
                        'active_connections': np.random.uniform(80, 150),
                        '5xx_errors': np.random.uniform(2, 10),
                        'requests_per_second': np.random.uniform(80, 150),
                        '4xx_errors': np.random.uniform(5, 20),
                        'total_requests': np.random.uniform(10000, 50000),
                        'running': True
                    },
                    'apache': {
                        '5xx_errors': np.random.uniform(2, 10),
                        'requests_per_second': np.random.uniform(80, 150),
                        'running': True
                    }
                },
                'npu': {'available': False},
                'error_log_count': np.random.randint(0, 5),
                'warning_log_count': np.random.randint(5, 15),
                'critical_log_count': np.random.randint(0, 2)
            }
        elif intervention_type == 'memory_stress':
            # 内存加压：模拟内存泄漏
            return {
                'cpu': {
                    'usage': np.random.uniform(60, 85),
                    'user': np.random.uniform(20, 40),
                    'system': np.random.uniform(15, 30),
                    'idle': np.random.uniform(15, 40),
                    'iowait': np.random.uniform(3, 10),
                    'irq': np.random.uniform(0, 3),
                    'softirq': np.random.uniform(0, 3),
                    'steal': np.random.uniform(0, 1),
                    'guest': np.random.uniform(0, 1)
                },
                'memory': {
                    'usage': np.random.uniform(85, 98),
                    'available': np.random.uniform(100, 800),
                    'used': np.random.uniform(16000, 19000),
                    'free': np.random.uniform(100, 800),
                    'cached': np.random.uniform(500, 1500),
                    'buffers': np.random.uniform(200, 800),
                    'swap_used': np.random.uniform(1000, 5000)
                },
                'disk': {
                    'usage': np.random.uniform(40, 70),
                    'read_bytes': np.random.uniform(8000, 20000),
                    'write_bytes': np.random.uniform(8000, 20000),
                    'read_count': np.random.uniform(50, 200),
                    'write_count': np.random.uniform(50, 200),
                    'read_time': np.random.uniform(30, 100),
                    'write_time': np.random.uniform(30, 100)
                },
                'network': {
                    'sent_rate': np.random.uniform(8000, 20000),
                    'recv_rate': np.random.uniform(8000, 20000),
                    'packets_sent': np.random.uniform(800, 2000),
                    'packets_recv': np.random.uniform(800, 2000),
                    'errin': 0, 'errout': 0, 'dropin': 0, 'dropout': 0
                },
                'services': {
                    'mysql': {
                        'connections': np.random.uniform(60, 120),
                        'slow_queries': np.random.uniform(3, 15),
                        'threads_connected': np.random.uniform(30, 70),
                        'threads_running': np.random.uniform(8, 25),
                        'uptime': np.random.uniform(1000, 10000),
                        'questions': np.random.uniform(800, 4000),
                        'opens': np.random.uniform(40, 150),
                        'flush_commands': np.random.uniform(8, 40),
                        'running': True
                    },
                    'redis': {
                        'connected_clients': np.random.uniform(30, 70),
                        'used_memory': np.random.uniform(12000, 18000),
                        'total_commands_processed': np.random.uniform(12000, 25000),
                        'keyspace_hits': np.random.uniform(150, 400),
                        'keyspace_misses': np.random.uniform(8, 40),
                        'evicted_keys': np.random.uniform(0, 8),
                        'expired_keys': np.random.uniform(3, 15),
                        'running': True
                    },
                    'nginx': {
                        'active_connections': np.random.uniform(60, 120),
                        '5xx_errors': np.random.uniform(1, 8),
                        'requests_per_second': np.random.uniform(60, 120),
                        '4xx_errors': np.random.uniform(3, 15),
                        'total_requests': np.random.uniform(8000, 40000),
                        'running': True
                    },
                    'apache': {
                        '5xx_errors': np.random.uniform(1, 8),
                        'requests_per_second': np.random.uniform(60, 120),
                        'running': True
                    }
                },
                'npu': {'available': False},
                'error_log_count': np.random.randint(0, 3),
                'warning_log_count': np.random.randint(3, 10),
                'critical_log_count': np.random.randint(0, 1)
            }
        elif intervention_type == 'disk_stress':
            # 磁盘加压：模拟IO瓶颈
            return {
                'cpu': {
                    'usage': np.random.uniform(50, 75),
                    'user': np.random.uniform(15, 35),
                    'system': np.random.uniform(10, 25),
                    'idle': np.random.uniform(25, 50),
                    'iowait': np.random.uniform(10, 25),
                    'irq': np.random.uniform(0, 2),
                    'softirq': np.random.uniform(0, 2),
                    'steal': np.random.uniform(0, 1),
                    'guest': np.random.uniform(0, 1)
                },
                'memory': {
                    'usage': np.random.uniform(30, 60),
                    'available': np.random.uniform(2000, 6000),
                    'used': np.random.uniform(8000, 12000),
                    'free': np.random.uniform(2000, 6000),
                    'cached': np.random.uniform(800, 2000),
                    'buffers': np.random.uniform(400, 1200),
                    'swap_used': np.random.uniform(0, 500)
                },
                'disk': {
                    'usage': np.random.uniform(85, 98),
                    'read_bytes': np.random.uniform(25000, 50000),
                    'write_bytes': np.random.uniform(25000, 50000),
                    'read_count': np.random.uniform(200, 500),
                    'write_count': np.random.uniform(200, 500),
                    'read_time': np.random.uniform(100, 300),
                    'write_time': np.random.uniform(100, 300)
                },
                'network': {
                    'sent_rate': np.random.uniform(5000, 15000),
                    'recv_rate': np.random.uniform(5000, 15000),
                    'packets_sent': np.random.uniform(500, 1500),
                    'packets_recv': np.random.uniform(500, 1500),
                    'errin': 0, 'errout': 0, 'dropin': 0, 'dropout': 0
                },
                'services': {
                    'mysql': {
                        'connections': np.random.uniform(40, 100),
                        'slow_queries': np.random.uniform(8, 25),
                        'threads_connected': np.random.uniform(20, 60),
                        'threads_running': np.random.uniform(5, 20),
                        'uptime': np.random.uniform(1000, 10000),
                        'questions': np.random.uniform(600, 3000),
                        'opens': np.random.uniform(30, 120),
                        'flush_commands': np.random.uniform(5, 30),
                        'running': True
                    },
                    'redis': {
                        'connected_clients': np.random.uniform(20, 60),
                        'used_memory': np.random.uniform(6000, 12000),
                        'total_commands_processed': np.random.uniform(8000, 20000),
                        'keyspace_hits': np.random.uniform(100, 300),
                        'keyspace_misses': np.random.uniform(5, 30),
                        'evicted_keys': np.random.uniform(0, 5),
                        'expired_keys': np.random.uniform(2, 10),
                        'running': True
                    },
                    'nginx': {
                        'active_connections': np.random.uniform(40, 100),
                        '5xx_errors': np.random.uniform(3, 12),
                        'requests_per_second': np.random.uniform(40, 100),
                        '4xx_errors': np.random.uniform(2, 10),
                        'total_requests': np.random.uniform(6000, 30000),
                        'running': True
                    },
                    'apache': {
                        '5xx_errors': np.random.uniform(3, 12),
                        'requests_per_second': np.random.uniform(40, 100),
                        'running': True
                    }
                },
                'npu': {'available': False},
                'error_log_count': np.random.randint(0, 2),
                'warning_log_count': np.random.randint(2, 8),
                'critical_log_count': np.random.randint(0, 1)
            }
        elif intervention_type == 'network_stress':
            # 网络异常：模拟网络延迟和丢包
            return {
                'cpu': {
                    'usage': np.random.uniform(40, 65),
                    'user': np.random.uniform(10, 30),
                    'system': np.random.uniform(8, 20),
                    'idle': np.random.uniform(35, 60),
                    'iowait': np.random.uniform(2, 8),
                    'irq': np.random.uniform(0, 1),
                    'softirq': np.random.uniform(0, 1),
                    'steal': np.random.uniform(0, 1),
                    'guest': np.random.uniform(0, 1)
                },
                'memory': {
                    'usage': np.random.uniform(25, 55),
                    'available': np.random.uniform(3000, 7000),
                    'used': np.random.uniform(6000, 11000),
                    'free': np.random.uniform(3000, 7000),
                    'cached': np.random.uniform(600, 1800),
                    'buffers': np.random.uniform(300, 900),
                    'swap_used': np.random.uniform(0, 300)
                },
                'disk': {
                    'usage': np.random.uniform(35, 65),
                    'read_bytes': np.random.uniform(3000, 12000),
                    'write_bytes': np.random.uniform(3000, 12000),
                    'read_count': np.random.uniform(30, 120),
                    'write_count': np.random.uniform(30, 120),
                    'read_time': np.random.uniform(20, 80),
                    'write_time': np.random.uniform(20, 80)
                },
                'network': {
                    'sent_rate': np.random.uniform(30000, 60000),
                    'recv_rate': np.random.uniform(30000, 60000),
                    'packets_sent': np.random.uniform(3000, 6000),
                    'packets_recv': np.random.uniform(3000, 6000),
                    'errin': 0, 'errout': 0, 'dropin': 0, 'dropout': 0
                },
                'services': {
                    'mysql': {
                        'connections': np.random.uniform(50, 110),
                        'slow_queries': np.random.uniform(2, 12),
                        'threads_connected': np.random.uniform(25, 65),
                        'threads_running': np.random.uniform(6, 18),
                        'uptime': np.random.uniform(1000, 10000),
                        'questions': np.random.uniform(700, 3500),
                        'opens': np.random.uniform(35, 130),
                        'flush_commands': np.random.uniform(6, 35),
                        'running': True
                    },
                    'redis': {
                        'connected_clients': np.random.uniform(25, 65),
                        'used_memory': np.random.uniform(5000, 11000),
                        'total_commands_processed': np.random.uniform(10000, 22000),
                        'keyspace_hits': np.random.uniform(120, 350),
                        'keyspace_misses': np.random.uniform(6, 35),
                        'evicted_keys': np.random.uniform(0, 3),
                        'expired_keys': np.random.uniform(1, 8),
                        'running': True
                    },
                    'nginx': {
                        'active_connections': np.random.uniform(50, 110),
                        '5xx_errors': np.random.uniform(4, 15),
                        'requests_per_second': np.random.uniform(50, 110),
                        '4xx_errors': np.random.uniform(3, 12),
                        'total_requests': np.random.uniform(7000, 35000),
                        'running': True
                    },
                    'apache': {
                        '5xx_errors': np.random.uniform(4, 15),
                        'requests_per_second': np.random.uniform(50, 110),
                        'running': True
                    }
                },
                'npu': {'available': False},
                'error_log_count': np.random.randint(0, 1),
                'warning_log_count': np.random.randint(1, 5),
                'critical_log_count': np.random.randint(0, 1)
            }
        else:
            # 正常系统指标
            return {
                'cpu': {
                    'usage': np.random.uniform(10, 90),
                    'user': np.random.uniform(5, 45),
                    'system': np.random.uniform(3, 30),
                    'idle': np.random.uniform(10, 85),
                    'iowait': np.random.uniform(0, 10),
                    'irq': np.random.uniform(0, 3),
                    'softirq': np.random.uniform(0, 3),
                    'steal': np.random.uniform(0, 2),
                    'guest': np.random.uniform(0, 1)
                },
                'memory': {
                    'usage': np.random.uniform(20, 80),
                    'available': np.random.uniform(1000, 8000),
                    'used': np.random.uniform(2000, 16000),
                    'free': np.random.uniform(1000, 8000),
                    'cached': np.random.uniform(500, 4000),
                    'buffers': np.random.uniform(200, 2000),
                    'swap_used': np.random.uniform(0, 1000)
                },
                'disk': {
                    'usage': np.random.uniform(30, 70),
                    'read_bytes': np.random.uniform(1000, 10000),
                    'write_bytes': np.random.uniform(1000, 10000),
                    'read_count': np.random.uniform(10, 100),
                    'write_count': np.random.uniform(10, 100),
                    'read_time': np.random.uniform(10, 50),
                    'write_time': np.random.uniform(10, 50)
                },
                'network': {
                    'sent_rate': np.random.uniform(1000, 10000),
                    'recv_rate': np.random.uniform(1000, 10000),
                    'packets_sent': np.random.uniform(100, 1000),
                    'packets_recv': np.random.uniform(100, 1000),
                    'errin': 0, 'errout': 0, 'dropin': 0, 'dropout': 0
                },
                'services': {
                    'mysql': {
                        'connections': np.random.uniform(10, 100),
                        'slow_queries': np.random.uniform(0, 10),
                        'threads_connected': np.random.uniform(5, 50),
                        'threads_running': np.random.uniform(2, 15),
                        'uptime': np.random.uniform(1000, 10000),
                        'questions': np.random.uniform(500, 2500),
                        'opens': np.random.uniform(20, 100),
                        'flush_commands': np.random.uniform(3, 25),
                        'running': True
                    },
                    'redis': {
                        'connected_clients': np.random.uniform(5, 50),
                        'used_memory': np.random.uniform(1000, 10000),
                        'total_commands_processed': np.random.uniform(1000, 10000),
                        'keyspace_hits': np.random.uniform(100, 1000),
                        'keyspace_misses': np.random.uniform(3, 25),
                        'evicted_keys': np.random.uniform(0, 2),
                        'expired_keys': np.random.uniform(1, 5),
                        'running': True
                    },
                    'nginx': {
                        'active_connections': np.random.uniform(10, 100),
                        '5xx_errors': np.random.uniform(0, 5),
                        'requests_per_second': np.random.uniform(10, 100),
                        '4xx_errors': np.random.uniform(1, 8),
                        'total_requests': np.random.uniform(5000, 25000),
                        'running': True
                    },
                    'apache': {
                        '5xx_errors': np.random.uniform(0, 5),
                        'requests_per_second': np.random.uniform(10, 100),
                        'running': True
                    }
                },
                'npu': {'available': False},
                'error_log_count': np.random.randint(0, 2),
                'warning_log_count': np.random.randint(1, 8),
                'critical_log_count': np.random.randint(0, 1)
            }


def main():
    """主函数 - 优化的模型训练器"""
    print("=== Kylin 模型训练器（优化版）===")
    print("此文件专门用于训练和保存异常检测模型")
    print("使用方法:")
    print("1. 运行此文件开始收集训练数据（1小时）")
    print("2. 收集足够数据后自动训练模型")
    print("3. 训练好的模型将保存到 trained_models 目录")

    # 创建模型训练器
    trainer = ModelTrainerOptimized()

    # 显示模型状态
    model_info = trainer.get_model_info()
    print(f"\n当前模型状态: {model_info}")

    if model_info['status'] == 'models_available':
        print(f"最新模型版本: {model_info['latest_version']}")
        print("可用模型:")

        # 根据数据源显示不同的模型信息
        if model_info['source'] == 'database':
            # 数据库中的模型信息
            for model in model_info['models']:
                print(f"  - {model['model_name']} (版本: {model['model_version']})")
                print(f"    训练样本: {model['training_samples']}, 特征维度: {model['feature_dim']}")
        else:
            # 本地文件中的模型信息
            for version_info in model_info['versions']:
                print(f"  - 版本 {version_info['version']}: {version_info['models']}")

    # 显示当前数据状态
    if trainer.use_database and trainer.db:
        try:
            db_data = trainer.db.get_training_data(hours=24)
            print(f"\n当前数据库中有 {len(db_data)} 个训练数据点")
            if len(db_data) >= 720:
                print("✅ 数据库中有足够的数据进行训练")
            else:
                print(f"⚠️  数据库中的数据不足，还需要 {720 - len(db_data)} 个数据点")
        except Exception as e:
            print(f"\n⚠️  无法查询数据库状态: {e}")

    # 开始训练模式
    print("\n开始训练模式...")
    print("按 Ctrl+C 停止训练")
    print("数据收集过程中会自动进行异常干预测试：")
    print("- CPU加压：模拟高负载")
    print("- 内存加压：模拟内存泄漏")
    print("- 磁盘加压：模拟IO瓶颈")
    print("- 网络异常：模拟网络延迟")

    try:
        intervention_counter = 0
        while True:
            intervention_counter += 1

            # 每500次数据收集进行一次异常干预测试
            if intervention_counter % 500 == 0:
                intervention_type = np.random.choice(['cpu_stress', 'memory_stress', 'disk_stress', 'network_stress'])
                print(f"\n🔴 执行异常干预测试: {intervention_type}")
                mock_metrics = trainer._generate_mock_metrics(intervention_type)
            else:
                mock_metrics = trainer._generate_mock_metrics()

            # 收集训练数据
            trainer.collect_training_data(mock_metrics)

            # 检查是否可以训练模型
            if trainer.has_sufficient_data(min_samples=720):
                print("1小时数据收集完成，开始训练模型...")
                try:
                    models = trainer.train_models()
                    print("模型训练完成！")
                    print("继续收集数据以更新模型...")
                    # 不退出，继续收集数据
                except Exception as e:
                    print(f"模型训练失败: {e}")
            else:
                # 如果最近1小时数据不足，但总数据量足够，也尝试训练
                if trainer.use_database and trainer.db:
                    try:
                        all_data = trainer.db.get_training_data(hours=24)  # 获取24小时数据
                        if len(all_data) >= 720:
                            print(f"最近1小时数据不足，但24小时数据充足({len(all_data)}个)，开始训练模型...")
                            models = trainer.train_models(use_extended_data=True)
                            print("模型训练完成！")
                            print("继续收集数据以更新模型...")
                            # 不退出，继续收集数据
                    except Exception as e:
                        print(f"使用24小时数据训练失败: {e}")

            time.sleep(5)  # 每5秒收集一次数据

    except KeyboardInterrupt:
        print("\n训练已停止")

        # 如果数据足够，尝试训练模型
        if trainer.has_sufficient_data(min_samples=360):  # 至少0.5小时数据
            print("尝试使用现有数据训练模型...")
            try:
                models = trainer.train_models()
                print("模型训练完成！")
            except Exception as e:
                print(f"模型训练失败: {e}")


if __name__ == "__main__":
    main()
