# ===================== 模块6 时间序列分析 ========================

# 假设这是MCP工具环境
@mcp.tool()
def time_series_stationarity_test(data: List[float], test_type: str = 'adf') -> Dict[str, Any]:
    """
    时间序列平稳性检验
    
    Args:
        data: 时间序列数据
        test_type: 检验类型 ('adf' - Augmented Dickey-Fuller)
    
    Returns:
        平稳性检验结果和可视化
    """
    try:
        if not STATSMODELS_AVAILABLE:
            return {"error": "statsmodels库未安装，无法进行时间序列分析"}
        
        # 转换为numpy数组
        ts_data = np.array(data, dtype=float)
        
        # 确保有足够的数据点
        if len(ts_data) < 10:
            return {"error": f"数据点不足({len(ts_data)})，至少需要10个点进行平稳性检验"}
        
        if test_type == 'adf':
            # ADF检验
            adf_result = adfuller(ts_data, autolag='AIC')
            statistic = float(adf_result[0])  # 修复：确保转换为Python float
            p_value = float(adf_result[1])    # 修复：确保转换为Python float
            critical_values = {
                '1%': float(adf_result[4]['1%']),
                '5%': float(adf_result[4]['5%']),
                '10%': float(adf_result[4]['10%'])
            }
            
            # 修复：使用临界值判断平稳性（更可靠）
            is_stationary = bool(statistic < critical_values['5%'])  # 修复：确保转换为Python bool
            
            # 创建可视化
            fig, axes = plt.subplots(2, 2, figsize=(14, 10))
            plt.suptitle('时间序列平稳性分析', fontsize=16, fontweight='bold')
            
            # 原始时间序列
            axes[0, 0].plot(ts_data, linewidth=2, color='blue')
            axes[0, 0].set_title('原始时间序列')
            axes[0, 0].set_xlabel('时间点')
            axes[0, 0].set_ylabel('观测值')
            axes[0, 0].grid(True, linestyle='--', alpha=0.7)
            
            # 滚动统计 - 修复：更合理的窗口大小
            window = max(3, min(10, len(ts_data) // 3))
            rolling_mean = pd.Series(ts_data).rolling(window=window).mean()
            rolling_std = pd.Series(ts_data).rolling(window=window).std()
            
            axes[0, 1].plot(ts_data, label='原始数据', alpha=0.7, color='blue')
            axes[0, 1].plot(rolling_mean, label=f'滚动均值(窗口={window})', color='red', linewidth=2)
            axes[0, 1].plot(rolling_std, label=f'滚动标准差(窗口={window})', color='green', linewidth=2)
            axes[0, 1].set_title('滚动统计')
            axes[0, 1].legend(loc='best')
            axes[0, 1].grid(True, linestyle='--', alpha=0.7)
            
            # 一阶差分 - 修复：处理短序列
            diff_data = np.diff(ts_data)
            if len(diff_data) > 1:
                axes[1, 0].plot(diff_data, color='purple', linewidth=2)
                axes[1, 0].set_title('一阶差分')
                axes[1, 0].set_xlabel('时间点')
                axes[1, 0].set_ylabel('差分值')
                axes[1, 0].grid(True, linestyle='--', alpha=0.7)
            else:
                axes[1, 0].axis('off')
                axes[1, 0].text(0.5, 0.5, '数据不足\n无法计算差分', 
                                ha='center', va='center', fontsize=12)
            
            # 检验结果摘要 - 改进：更详细的结果解释
            axes[1, 1].axis('off')
            
            # 添加ADF检验统计解释
            stat_explanation = (
                "ADF统计量衡量序列与单位根过程的偏差。\n"
                "负值越大，序列越可能是平稳的。\n"
                f"当前值: {statistic:.4f}"
            )
            
            # 添加结论解释
            conclusion_explanation = (
                f"序列{'平稳' if is_stationary else '非平稳'}\n\n"
                f"原因: ADF统计量({statistic:.4f}) "
                f"{'<' if is_stationary else '>'} "
                f"5%临界值({critical_values['5%']:.4f})"
            )
            
            result_text = f"""
ADF平稳性检验结果:

检验统计量: {statistic:.6f}
P值: {p_value:.6f}

临界值:
1%: {critical_values['1%']:.6f} (99%置信度平稳)
5%: {critical_values['5%']:.6f} (95%置信度平稳) → 决策边界
10%: {critical_values['10%']:.6f} (90%置信度平稳)

{conclusion_explanation}

统计量解释:
{stat_explanation}
            """
            
            # 根据结果使用不同颜色背景
            bg_color = "lightgreen" if is_stationary else "lightcoral"
            axes[1, 1].text(0.1, 0.5, result_text, fontsize=11, 
                    verticalalignment='center', linespacing=1.5,
                    bbox=dict(boxstyle="round,pad=0.8", facecolor=bg_color, alpha=0.8))
            
            plt.tight_layout(rect=[0, 0.03, 1, 0.95])
            
            # 将图表转换为base64
            plot_base64 = create_plot_base64(fig)
            
            return {
                'test_type': 'ADF',
                'statistic': statistic,
                'p_value': p_value,
                'critical_values': critical_values,
                'is_stationary': is_stationary,
                'conclusion': '平稳' if is_stationary else '非平稳',
                'decision_rule': f"ADF统计量 < {critical_values['5%']:.4f} (5%临界值)",
                'visualization': plot_base64
            }
        
        else:
            return {"error": f"不支持的检验类型: {test_type}"}
            
    except Exception as e:
        return {"error": f"平稳性检验错误: {str(e)}"}



@mcp.tool()
def time_series_decomposition(data: List[float], model: str = 'additive', period: int = 12) -> Dict[str, Any]:
    """
    时间序列分解
    
    Args:
        data: 时间序列数据
        model: 分解模型 ('additive', 'multiplicative')
        period: 季节性周期
    
    Returns:
        分解结果和可视化
    """
    try:
        if not STATSMODELS_AVAILABLE:
            return {"error": "statsmodels库未安装，无法进行时间序列分析"}
        
        if len(data) < 2 * period:
            return {"error": f"数据长度({len(data)})必须至少是周期({period})的2倍"}
        
        ts_data = pd.Series(data)
        
        # 时间序列分解
        decomposition = seasonal_decompose(ts_data, model=model, period=period)
        
        # 创建可视化
        fig, axes = plt.subplots(4, 1, figsize=(12, 10))
        
        # 原始序列
        axes[0].plot(decomposition.observed, linewidth=2, color='blue')
        axes[0].set_title('原始时间序列')
        axes[0].set_ylabel('观测值')
        axes[0].grid(True, alpha=0.3)
        
        # 趋势
        axes[1].plot(decomposition.trend, linewidth=2, color='red')
        axes[1].set_title('趋势成分')
        axes[1].set_ylabel('趋势')
        axes[1].grid(True, alpha=0.3)
        
        # 季节性
        axes[2].plot(decomposition.seasonal, linewidth=2, color='green')
        axes[2].set_title('季节性成分')
        axes[2].set_ylabel('季节性')
        axes[2].grid(True, alpha=0.3)
        
        # 残差
        axes[3].plot(decomposition.resid, linewidth=2, color='orange')
        axes[3].set_title('残差成分')
        axes[3].set_xlabel('时间')
        axes[3].set_ylabel('残差')
        axes[3].grid(True, alpha=0.3)
        
        plt.tight_layout()
        plot_base64 = create_plot_base64(fig)
        
        # 计算各成分的统计信息
        trend_stats = {
            'mean': float(np.nanmean(decomposition.trend)),
            'std': float(np.nanstd(decomposition.trend)),
            'min': float(np.nanmin(decomposition.trend)),
            'max': float(np.nanmax(decomposition.trend))
        }
        
        seasonal_stats = {
            'mean': float(np.mean(decomposition.seasonal)),
            'std': float(np.std(decomposition.seasonal)),
            'amplitude': float(np.max(decomposition.seasonal) - np.min(decomposition.seasonal))
        }
        
        resid_stats = {
            'mean': float(np.nanmean(decomposition.resid)),
            'std': float(np.nanstd(decomposition.resid)),
            'variance': float(np.nanvar(decomposition.resid))
        }
        
        return {
            'model': model,
            'period': period,
            'trend': decomposition.trend[~np.isnan(decomposition.trend)].tolist(),
            'seasonal': decomposition.seasonal.tolist(),
            'residual': decomposition.resid[~np.isnan(decomposition.resid)].tolist(),
            'trend_stats': trend_stats,
            'seasonal_stats': seasonal_stats,
            'residual_stats': resid_stats,
            'visualization': plot_base64
        }
        
    except Exception as e:
        return {"error": f"时间序列分解错误: {str(e)}"}

@mcp.tool()
def ar_model_analysis(data: List[float], lags: int = 5, method: str = 'ols') -> Dict[str, Any]:
    """
    AR(自回归)模型分析
    
    Args:
        data: 时间序列数据
        lags: 滞后阶数
        method: 估计方法 ('ols', 'mle')
    
    Returns:
        AR模型结果和预测
    """
    try:
        if not STATSMODELS_AVAILABLE:
            return {"error": "statsmodels库未安装，无法进行时间序列分析"}
        
        if len(data) <= lags + 1:
            return {"error": f"数据长度({len(data)})必须大于滞后阶数({lags}) + 1"}
        
        ts_data = np.array(data)
        
        # 拟合AR模型
        ar_model = AutoReg(ts_data, lags=lags, trend='c')
        ar_fitted = ar_model.fit()
        
        # 模型预测
        forecast_steps = min(10, len(data) // 4)
        forecast = ar_fitted.forecast(steps=forecast_steps)
        
        # 计算拟合值
        fitted_values = ar_fitted.fittedvalues
        residuals = ar_fitted.resid
        
        # 创建可视化
        fig, axes = plt.subplots(2, 2, figsize=(12, 8))
        
        # 原始数据和拟合值
        axes[0, 0].plot(ts_data, label='原始数据', linewidth=2, alpha=0.8)
        axes[0, 0].plot(range(lags, len(ts_data)), fitted_values, 
                       label='AR拟合值', linewidth=2, alpha=0.8)
        
        # 添加预测
        forecast_index = range(len(ts_data), len(ts_data) + forecast_steps)
        axes[0, 0].plot(forecast_index, forecast, 
                       label=f'预测({forecast_steps}步)', linewidth=2, linestyle='--')
        
        axes[0, 0].set_title(f'AR({lags})模型拟合和预测')
        axes[0, 0].set_xlabel('时间')
        axes[0, 0].set_ylabel('值')
        axes[0, 0].legend()
        axes[0, 0].grid(True, alpha=0.3)
        
        # 残差图
        axes[0, 1].plot(residuals, linewidth=2, color='red')
        axes[0, 1].axhline(y=0, color='black', linestyle='--', alpha=0.5)
        axes[0, 1].set_title('残差序列')
        axes[0, 1].set_xlabel('时间')
        axes[0, 1].set_ylabel('残差')
        axes[0, 1].grid(True, alpha=0.3)
        
        # ACF和PACF图
        if len(residuals) > 10:
            acf_values = acf(residuals, nlags=min(20, len(residuals)//4), fft=False)
            pacf_values = pacf(residuals, nlags=min(20, len(residuals)//4))
            
            axes[1, 0].stem(range(len(acf_values)), acf_values, basefmt=" ")
            axes[1, 0].axhline(y=0, color='black', linestyle='-', alpha=0.5)
            axes[1, 0].axhline(y=1.96/np.sqrt(len(residuals)), color='red', linestyle='--', alpha=0.5)
            axes[1, 0].axhline(y=-1.96/np.sqrt(len(residuals)), color='red', linestyle='--', alpha=0.5)
            axes[1, 0].set_title('残差ACF')
            axes[1, 0].set_xlabel('滞后')
            axes[1, 0].set_ylabel('ACF')
            axes[1, 0].grid(True, alpha=0.3)
            
            axes[1, 1].stem(range(len(pacf_values)), pacf_values, basefmt=" ")
            axes[1, 1].axhline(y=0, color='black', linestyle='-', alpha=0.5)
            axes[1, 1].axhline(y=1.96/np.sqrt(len(residuals)), color='red', linestyle='--', alpha=0.5)
            axes[1, 1].axhline(y=-1.96/np.sqrt(len(residuals)), color='red', linestyle='--', alpha=0.5)
            axes[1, 1].set_title('残差PACF')
            axes[1, 1].set_xlabel('滞后')
            axes[1, 1].set_ylabel('PACF')
            axes[1, 1].grid(True, alpha=0.3)
        else:
            axes[1, 0].text(0.5, 0.5, '数据不足\n无法计算ACF', ha='center', va='center')
            axes[1, 1].text(0.5, 0.5, '数据不足\n无法计算PACF', ha='center', va='center')
        
        plt.tight_layout()
        plot_base64 = create_plot_base64(fig)
        
        # 模型统计信息
        model_stats = {
            'aic': float(ar_fitted.aic),
            'bic': float(ar_fitted.bic),
            'llf': float(ar_fitted.llf),
            'rsquared': float(ar_fitted.rsquared) if hasattr(ar_fitted, 'rsquared') else None,
            'mse': float(np.mean(residuals**2))
        }
        
        return {
            'model_type': f'AR({lags})',
            'lags': lags,
            'coefficients': ar_fitted.params.tolist(),
            'fitted_values': fitted_values.tolist(),
            'residuals': residuals.tolist(),
            'forecast': forecast.tolist(),
            'model_stats': model_stats,
            'visualization': plot_base64
        }
        
    except Exception as e:
        return {"error": f"AR模型分析错误: {str(e)}"}

@mcp.tool()
def arima_model_analysis(data: List[float], order: Tuple[int, int, int] = (1, 1, 1), 
                        seasonal_order: Optional[Tuple[int, int, int, int]] = None) -> Dict[str, Any]:
    """
    ARIMA模型分析
    
    Args:
        data: 时间序列数据
        order: ARIMA模型阶数 (p, d, q)
        seasonal_order: 季节性ARIMA阶数 (P, D, Q, s)，可选
    
    Returns:
        ARIMA模型结果和预测
    """
    try:
        if not STATSMODELS_AVAILABLE:
            return {"error": "statsmodels库未安装，无法进行时间序列分析"}
        
        p, d, q = order
        min_length = max(p + d + q + 1, 10)
        
        if len(data) < min_length:
            return {"error": f"数据长度({len(data)})不足，至少需要{min_length}个观测值"}
        
        ts_data = np.array(data)
        
        # 拟合ARIMA模型
        if seasonal_order:
            arima_model = ARIMA(ts_data, order=order, seasonal_order=seasonal_order)
        else:
            arima_model = ARIMA(ts_data, order=order)
        
        arima_fitted = arima_model.fit()
        
        # 模型预测
        forecast_steps = min(10, len(data) // 4)
        forecast_result = arima_fitted.forecast(steps=forecast_steps)
        forecast_ci = arima_fitted.get_forecast(steps=forecast_steps).conf_int()
        
        # 计算拟合值和残差
        fitted_values = arima_fitted.fittedvalues
        residuals = arima_fitted.resid
        
        # 创建可视化
        fig, axes = plt.subplots(2, 2, figsize=(12, 8))
        
        # 原始数据、拟合值和预测
        axes[0, 0].plot(ts_data, label='原始数据', linewidth=2, alpha=0.8)
        axes[0, 0].plot(fitted_values, label='ARIMA拟合值', linewidth=2, alpha=0.8)
        
        # 添加预测和置信区间
        forecast_index = range(len(ts_data), len(ts_data) + forecast_steps)
        axes[0, 0].plot(forecast_index, forecast_result, 
                       label=f'预测({forecast_steps}步)', linewidth=2, linestyle='--')
        
        if len(forecast_ci) > 0:
            axes[0, 0].fill_between(forecast_index, 
                                   forecast_ci[:, 0], forecast_ci[:, 1],
                                   alpha=0.3, label='95%置信区间')
        
        model_name = f'ARIMA{order}'
        if seasonal_order:
            model_name += f'×{seasonal_order}'
        
        axes[0, 0].set_title(f'{model_name}模型拟合和预测')
        axes[0, 0].set_xlabel('时间')
        axes[0, 0].set_ylabel('值')
        axes[0, 0].legend()
        axes[0, 0].grid(True, alpha=0.3)
        
        # 残差图
        axes[0, 1].plot(residuals, linewidth=2, color='red')
        axes[0, 1].axhline(y=0, color='black', linestyle='--', alpha=0.5)
        axes[0, 1].set_title('残差序列')
        axes[0, 1].set_xlabel('时间')
        axes[0, 1].set_ylabel('残差')
        axes[0, 1].grid(True, alpha=0.3)
        
        # 残差Q-Q图
        from scipy import stats
        stats.probplot(residuals, dist="norm", plot=axes[1, 0])
        axes[1, 0].set_title('残差Q-Q图')
        axes[1, 0].grid(True, alpha=0.3)
        
        # 模型诊断信息
        axes[1, 1].axis('off')
        
        # 计算Ljung-Box检验
        try:
            from statsmodels.stats.diagnostic import acorr_ljungbox
            lb_test = acorr_ljungbox(residuals, lags=min(10, len(residuals)//4), return_df=True)
            lb_pvalue = lb_test['lb_pvalue'].iloc[-1]
        except:
            lb_pvalue = None
        
        result_text = f"""
{model_name}模型诊断:

模型信息:
AIC: {arima_fitted.aic:.4f}
BIC: {arima_fitted.bic:.4f}
对数似然: {arima_fitted.llf:.4f}

残差统计:
均值: {np.mean(residuals):.6f}
标准差: {np.std(residuals):.6f}
偏度: {stats.skew(residuals):.4f}
峰度: {stats.kurtosis(residuals):.4f}

{f'Ljung-Box检验 p值: {lb_pvalue:.4f}' if lb_pvalue is not None else ''}
        """
        
        axes[1, 1].text(0.1, 0.5, result_text, fontsize=10, verticalalignment='center',
                bbox=dict(boxstyle="round,pad=0.3", facecolor="lightblue", alpha=0.8))
        
        plt.tight_layout()
        plot_base64 = create_plot_base64(fig)
        
        # 模型统计信息
        model_stats = {
            'aic': float(arima_fitted.aic),
            'bic': float(arima_fitted.bic),
            'llf': float(arima_fitted.llf),
            'mse': float(np.mean(residuals**2)),
            'mae': float(np.mean(np.abs(residuals))),
            'ljung_box_pvalue': float(lb_pvalue) if lb_pvalue is not None else None
        }
        
        return {
            'model_type': model_name,
            'order': order,
            'seasonal_order': seasonal_order,
            'coefficients': arima_fitted.params.tolist(),
            'fitted_values': fitted_values.tolist(),
            'residuals': residuals.tolist(),
            'forecast': forecast_result.tolist(),
            'forecast_ci_lower': forecast_ci[:, 0].tolist() if len(forecast_ci) > 0 else [],
            'forecast_ci_upper': forecast_ci[:, 1].tolist() if len(forecast_ci) > 0 else [],
            'model_stats': model_stats,
            'visualization': plot_base64
        }
        
    except Exception as e:
        return {"error": f"ARIMA模型分析错误: {str(e)}"}

@mcp.tool()
def time_series_auto_arima(data: List[float], max_p: int = 3, max_d: int = 2, max_q: int = 3, 
                           seasonal: bool = False, m: int = 12, timeout: int = 60) -> Dict[str, Any]:
    """
    智能自动ARIMA模型选择 - 增强版
    
    Args:
        data: 时间序列数据
        max_p: 最大AR阶数 (默认3，平衡性能与准确性)
        max_d: 最大差分阶数 (默认2，通常足够)
        max_q: 最大MA阶数 (默认3，平衡性能与准确性)
        seasonal: 是否考虑季节性
        m: 季节性周期
        timeout: 超时时间(秒)
    
    Returns:
        最优ARIMA模型结果和详细分析
    """
    import time
    import warnings
    from concurrent.futures import ThreadPoolExecutor, TimeoutError as FutureTimeoutError
    
    def detect_periodicity(ts_data):
        """智能检测时间序列的周期性"""
        try:
            from scipy.fft import fft
            from scipy.signal import find_peaks
            
            # FFT频域分析
            fft_vals = np.abs(fft(ts_data - np.mean(ts_data)))
            freqs = np.fft.fftfreq(len(ts_data))
            
            # 找到主要频率成分
            peaks, _ = find_peaks(fft_vals[:len(fft_vals)//2], height=np.max(fft_vals)*0.1)
            
            if len(peaks) > 0:
                dominant_freq = freqs[peaks[np.argmax(fft_vals[peaks])]]
                if dominant_freq > 0:
                    period = int(1 / dominant_freq)
                    if 2 <= period <= len(ts_data) // 3:
                        return period
            
            # 备选方法：自相关分析
            autocorr = np.correlate(ts_data, ts_data, mode='full')
            autocorr = autocorr[len(autocorr)//2:]
            autocorr = autocorr / autocorr[0]
            
            # 寻找第一个显著的正峰值
            for i in range(2, min(len(autocorr), len(ts_data)//3)):
                if autocorr[i] > 0.3 and autocorr[i] > autocorr[i-1] and autocorr[i] > autocorr[i+1]:
                    return i
                    
            return m  # 默认周期
        except:
            return m
    
    def check_stationarity(ts_data):
        """检查平稳性并建议差分阶数"""
        try:
            # ADF检验
            adf_result = adfuller(ts_data, autolag='AIC')
            is_stationary = adf_result[1] < 0.05
            
            if is_stationary:
                return 0, "序列已平稳"
            
            # 尝试一阶差分
            diff1 = np.diff(ts_data)
            if len(diff1) > 10:
                adf_result1 = adfuller(diff1, autolag='AIC')
                if adf_result1[1] < 0.05:
                    return 1, "一阶差分后平稳"
            
            # 尝试二阶差分
            if len(diff1) > 10:
                diff2 = np.diff(diff1)
                if len(diff2) > 10:
                    adf_result2 = adfuller(diff2, autolag='AIC')
                    if adf_result2[1] < 0.05:
                        return 2, "二阶差分后平稳"
            
            return 1, "建议一阶差分"
        except:
            return 1, "默认一阶差分"
    
    def fit_arima_model(order, seasonal_order, ts_data):
        """安全且高效地拟合ARIMA模型"""
        try:
            with warnings.catch_warnings():
                warnings.simplefilter("ignore")
                
                # 智能参数检查
                p, d, q = order
                if p + q > 8:  # 适度放宽限制
                    return None
                    
                if seasonal_order:
                    P, D, Q, s = seasonal_order
                    if P + Q > 6 or s > len(ts_data) // 3:  # 季节性参数检查
                        return None
                
                # 多种拟合方法尝试
                methods = ['lbfgs', 'bfgs', 'nm']
                
                for method in methods:
                    try:
                        if seasonal_order:
                            model = ARIMA(ts_data, order=order, seasonal_order=seasonal_order,
                                        enforce_stationarity=False, enforce_invertibility=False)
                        else:
                            model = ARIMA(ts_data, order=order,
                                        enforce_stationarity=False, enforce_invertibility=False)
                        
                        fitted = model.fit(method=method, maxiter=100, disp=False)
                        
                        # 严格的收敛性检查
                        if hasattr(fitted, 'mle_retvals') and not fitted.mle_retvals.get('converged', True):
                            continue
                            
                        # 检查模型有效性
                        if not (np.isfinite(fitted.aic) and np.isfinite(fitted.bic)):
                            continue
                            
                        # 检查残差的合理性
                        residuals = fitted.resid
                        if np.any(np.isnan(residuals)) or np.std(residuals) == 0:
                            continue
                            
                        return {
                            'order': order,
                            'seasonal_order': seasonal_order,
                            'aic': float(fitted.aic),
                            'bic': float(fitted.bic),
                            'llf': float(fitted.llf),
                            'model': fitted,
                            'method': method,
                            'residual_std': float(np.std(residuals))
                        }
                        
                    except Exception:
                        continue
                        
                return None
                
        except Exception:
            return None
    
    try:
        if not STATSMODELS_AVAILABLE:
            return {"error": "statsmodels库未安装，无法进行时间序列分析"}
        
        # 增强的数据验证
        if len(data) < 15:  # 降低最小要求，提高适用性
            return {"error": "数据长度不足，至少需要15个观测值进行自动模型选择"}
            
        if seasonal and len(data) < 2 * m:
            return {"error": f"季节性分析需要至少{2*m}个观测值"}
        
        ts_data = np.array(data, dtype=float)
        
        # 检查数据中是否有异常值
        if np.any(np.isnan(ts_data)) or np.any(np.isinf(ts_data)):
            return {"error": "数据包含NaN或无穷大值，请先清理数据"}
        
        # 智能平稳性检测
        suggested_d, stationarity_msg = check_stationarity(ts_data)
        
        # 智能周期性检测
        if seasonal:
            detected_period = detect_periodicity(ts_data)
            if detected_period != m and 2 <= detected_period <= len(ts_data) // 3:
                m = detected_period
        
        # 自适应调整搜索范围
        data_len = len(data)
        if data_len < 50:
            max_p = min(max_p, 2)
            max_q = min(max_q, 2)
            max_d = min(max_d, 1)
        elif data_len < 100:
            max_p = min(max_p, 3)
            max_q = min(max_q, 3)
        
        # 使用建议的差分阶数
        if suggested_d <= max_d:
            max_d = min(max_d, suggested_d + 1)
            
        start_time = time.time()
        best_aic = np.inf
        best_model_info = None
        results = []
        
        # 智能优先级配置（基于平稳性检测结果）
        priority_configs = [
            # 基于检测到的差分阶数的基础模型
            ((1, suggested_d, 1), None),
            ((0, suggested_d, 1), None),
            ((1, suggested_d, 0), None),
            ((2, suggested_d, 1), None),
            # 传统常用配置
            ((1, 1, 1), None),
            ((0, 1, 1), None),
            ((2, 1, 2), None),
        ]
        
        # 智能季节性配置
        if seasonal and len(data) > 2 * m:
            seasonal_configs = [
                ((1, suggested_d, 1), (1, 1, 1, m)),
                ((0, suggested_d, 1), (0, 1, 1, m)),
                ((1, suggested_d, 0), (1, 1, 0, m)),
                ((1, 1, 1), (1, 1, 1, m)),
                ((0, 1, 1), (0, 1, 1, m)),
            ]
            priority_configs.extend(seasonal_configs)
        
        # 去重并过滤有效配置
        valid_priority_configs = []
        seen_configs = set()
        for order, seasonal_order in priority_configs:
            p, d, q = order
            if (p <= max_p and d <= max_d and q <= max_q and 
                (order, seasonal_order) not in seen_configs):
                valid_priority_configs.append((order, seasonal_order))
                seen_configs.add((order, seasonal_order))
        
        # 首先尝试优先配置
        for order, seasonal_order in valid_priority_configs:
            if time.time() - start_time > timeout * 0.4:  # 40%时间用于优先配置
                break
                
            result = fit_arima_model(order, seasonal_order, ts_data)
            if result:
                results.append(result)
                if result['aic'] < best_aic:
                    best_aic = result['aic']
                    best_model_info = result
        
        # 智能全面搜索（如果时间允许）
        if time.time() - start_time < timeout * 0.7:
            search_configs = []
            searched_configs = set(valid_priority_configs)
            
            # 生成智能搜索配置
            # 1. 围绕建议差分阶数的配置
            for p in range(max_p + 1):
                for q in range(max_q + 1):
                    for d_offset in [-1, 0, 1]:  # 在建议差分阶数周围搜索
                        d = max(0, min(max_d, suggested_d + d_offset))
                        config = ((p, d, q), None)
                        if config not in searched_configs:
                            search_configs.append(config)
                            searched_configs.add(config)
            
            # 2. 季节性模型的智能搜索
            if seasonal and len(data) > 2 * m:
                for p in range(min(3, max_p + 1)):  # 限制搜索范围
                    for d in range(min(2, max_d + 1)):
                        for q in range(min(3, max_q + 1)):
                            for P in range(min(2, max_p + 1)):
                                for D in range(min(2, max_d + 1)):
                                    for Q in range(min(2, max_q + 1)):
                                        config = ((p, d, q), (P, D, Q, m))
                                        if config not in searched_configs:
                                            search_configs.append(config)
                                            searched_configs.add(config)
            
            # 3. 基于当前最优模型的邻近搜索
            if best_model_info:
                best_p, best_d, best_q = best_model_info['order']
                for p_offset in [-1, 0, 1]:
                    for q_offset in [-1, 0, 1]:
                        p = max(0, min(max_p, best_p + p_offset))
                        q = max(0, min(max_q, best_q + q_offset))
                        config = ((p, best_d, q), None)
                        if config not in searched_configs:
                            search_configs.append(config)
                            searched_configs.add(config)
            
            # 智能排序：优先尝试参数较少的模型
            search_configs.sort(key=lambda x: sum(x[0]) + (sum(x[1][:3]) if x[1] else 0))
            
            # 限制搜索数量以控制时间
            max_search = min(len(search_configs), 60)
            
            for i, (order, seasonal_order) in enumerate(search_configs[:max_search]):
                if time.time() - start_time > timeout * 0.85:  # 85%时间限制
                    break
                    
                result = fit_arima_model(order, seasonal_order, ts_data)
                if result:
                    results.append(result)
                    if result['aic'] < best_aic:
                        best_aic = result['aic']
                        best_model_info = result
        
        if best_model_info is None or len(results) == 0:
            # 如果没有找到合适的模型，尝试最简单的配置
            simple_result = fit_arima_model((1, 1, 1), None, ts_data)
            if simple_result:
                best_model_info = simple_result
                results = [simple_result]
            else:
                return {"error": "无法找到收敛的ARIMA模型，请检查数据质量"}
        
        best_model = best_model_info['model']
        best_order = best_model_info['order']
        best_seasonal_order = best_model_info['seasonal_order']
        
        # 安全地进行预测
        try:
            forecast_steps = min(12, len(data) // 6)  # 更保守的预测步数
            forecast_result = best_model.forecast(steps=forecast_steps)
            
            # 获取置信区间，如果失败则跳过
            try:
                forecast_ci = best_model.get_forecast(steps=forecast_steps).conf_int()
            except:
                forecast_ci = np.array([])  # 如果置信区间计算失败，返回空数组
                
        except Exception as e:
            forecast_steps = 1
            forecast_result = best_model.forecast(steps=1)
            forecast_ci = np.array([])
        
        fitted_values = best_model.fittedvalues
        residuals = best_model.resid
        
        # 创建增强的可视化
        try:
            fig = plt.figure(figsize=(16, 12))
            gs = fig.add_gridspec(3, 3, hspace=0.3, wspace=0.3)
            
            model_name = f'ARIMA{best_order}'
            if best_seasonal_order:
                model_name += f'×{best_seasonal_order}'
            
            # 1. 模型拟合和预测 (大图)
            ax1 = fig.add_subplot(gs[0, :])
            ax1.plot(ts_data, label='原始数据', linewidth=2, alpha=0.8, color='blue')
            ax1.plot(fitted_values, label='模型拟合', linewidth=2, alpha=0.8, color='red')
            
            forecast_index = range(len(ts_data), len(ts_data) + forecast_steps)
            ax1.plot(forecast_index, forecast_result, 
                    label=f'预测({forecast_steps}步)', linewidth=2, linestyle='--', color='green')
            
            if len(forecast_ci) > 0:
                ax1.fill_between(forecast_index, 
                               forecast_ci[:, 0], forecast_ci[:, 1],
                               alpha=0.3, label='95%置信区间', color='green')
            
            ax1.set_title(f'智能自动选择的最优模型: {model_name}\n平稳性: {stationarity_msg}', fontsize=14)
            ax1.set_xlabel('时间')
            ax1.set_ylabel('值')
            ax1.legend()
            ax1.grid(True, alpha=0.3)
            
            # 2. 残差分析
            ax2 = fig.add_subplot(gs[1, 0])
            ax2.plot(residuals, linewidth=1.5, color='red', alpha=0.8)
            ax2.axhline(y=0, color='black', linestyle='--', alpha=0.5)
            ax2.set_title('残差序列')
            ax2.set_xlabel('时间')
            ax2.set_ylabel('残差')
            ax2.grid(True, alpha=0.3)
            
            # 3. 残差Q-Q图
            ax3 = fig.add_subplot(gs[1, 1])
            try:
                from scipy import stats
                stats.probplot(residuals, dist="norm", plot=ax3)
                ax3.set_title('残差Q-Q图')
                ax3.grid(True, alpha=0.3)
            except:
                ax3.hist(residuals, bins=20, alpha=0.7, color='red', edgecolor='black')
                ax3.set_title('残差分布')
                ax3.set_xlabel('残差值')
                ax3.set_ylabel('频数')
                ax3.grid(True, alpha=0.3)
            
            # 4. ACF/PACF图
            ax4 = fig.add_subplot(gs[1, 2])
            try:
                from statsmodels.tsa.stattools import acf, pacf
                
                # 计算ACF和PACF
                lags = min(20, len(residuals) // 4)
                acf_vals = acf(residuals, nlags=lags, alpha=0.05)
                
                # 绘制ACF
                ax4.stem(range(len(acf_vals[0])), acf_vals[0], basefmt=" ")
                ax4.fill_between(range(len(acf_vals[0])), acf_vals[1][:, 0], acf_vals[1][:, 1], 
                               alpha=0.3, color='blue')
                ax4.set_title('残差ACF')
                ax4.set_xlabel('滞后期')
                ax4.set_ylabel('自相关系数')
                ax4.grid(True, alpha=0.3)
            except:
                # 如果ACF计算失败，显示残差自相关
                ax4.plot(np.correlate(residuals, residuals, mode='full')[len(residuals)-10:len(residuals)+10])
                ax4.set_title('残差自相关')
                ax4.grid(True, alpha=0.3)
            
            # 5. 模型比较
            ax5 = fig.add_subplot(gs[2, 0])
            if len(results) > 1:
                top_models = sorted(results, key=lambda x: x['aic'])[:min(8, len(results))]
                model_names = []
                aics = []
                
                for model_info in top_models:
                    name = f"ARIMA{model_info['order']}"
                    if model_info['seasonal_order']:
                        name += f"×{model_info['seasonal_order'][:3]}"
                    model_names.append(name)
                    aics.append(model_info['aic'])
                
                bars = ax5.barh(range(len(model_names)), aics, alpha=0.7, 
                               color=['red' if i == 0 else 'lightblue' for i in range(len(aics))])
                ax5.set_yticks(range(len(model_names)))
                ax5.set_yticklabels(model_names, fontsize=9)
                ax5.set_xlabel('AIC值')
                ax5.set_title(f'模型比较 (前{len(model_names)}个)')
                ax5.grid(True, alpha=0.3)
                
                # 标注最优模型
                ax5.text(aics[0], 0, f' 最优: {aics[0]:.2f}', va='center', fontweight='bold')
            else:
                ax5.text(0.5, 0.5, '只找到一个收敛模型', 
                        ha='center', va='center', transform=ax5.transAxes)
                ax5.set_title('模型比较')
            
            # 6. 模型诊断信息
            ax6 = fig.add_subplot(gs[2, 1:])
            ax6.axis('off')
            
            # 计算增强的模型诊断统计
            ljung_box_p = None
            jarque_bera_p = None
            durbin_watson = None
            
            try:
                from statsmodels.stats.diagnostic import acorr_ljungbox
                lb_result = acorr_ljungbox(residuals, lags=min(10, len(residuals)//4), return_df=True)
                ljung_box_p = lb_result['lb_pvalue'].iloc[-1]
            except:
                pass
            
            try:
                from statsmodels.stats.stattools import jarque_bera
                jb_stat, jb_p, _, _ = jarque_bera(residuals)
                jarque_bera_p = jb_p
            except:
                pass
            
            try:
                from statsmodels.stats.stattools import durbin_watson as dw
                durbin_watson = dw(residuals)
            except:
                pass
            
            # 计算预测准确性指标
            mape = np.mean(np.abs((ts_data[1:] - fitted_values[1:]) / ts_data[1:])) * 100 if len(fitted_values) > 1 else 0
            rmse = np.sqrt(np.mean((ts_data - fitted_values) ** 2))
            mae = np.mean(np.abs(ts_data - fitted_values))
            
            result_text = f"""
🎯 智能自动ARIMA模型选择结果

📊 最优模型: {model_name}
   AIC: {best_model.aic:.4f}
   BIC: {best_model.bic:.4f}
   对数似然: {best_model.llf:.4f}
   拟合方法: {best_model_info.get('method', 'lbfgs')}

🔍 搜索统计:
   评估模型数量: {len(results)}
   搜索时间: {time.time() - start_time:.2f}秒
   搜索范围: p≤{max_p}, d≤{max_d}, q≤{max_q}
   建议差分阶数: {suggested_d}
   季节性: {'是' if seasonal else '否'}
   {f'检测周期: {m}' if seasonal else ''}

📈 预测准确性:
   RMSE: {rmse:.4f}
   MAE: {mae:.4f}
   MAPE: {mape:.2f}%

🧪 模型诊断:
   {f'Ljung-Box检验 p值: {ljung_box_p:.4f}' if ljung_box_p is not None else 'Ljung-Box检验: 未计算'}
   {f'Jarque-Bera检验 p值: {jarque_bera_p:.4f}' if jarque_bera_p is not None else 'Jarque-Bera检验: 未计算'}
   {f'Durbin-Watson统计量: {durbin_watson:.4f}' if durbin_watson is not None else 'Durbin-Watson: 未计算'}
   残差标准差: {np.std(residuals):.4f}
            """
            
            ax6.text(0.05, 0.95, result_text, fontsize=10, verticalalignment='top',
                    bbox=dict(boxstyle="round,pad=0.5", facecolor="lightblue", alpha=0.8),
                    transform=ax6.transAxes, family='monospace')
            
            plt.tight_layout()
            plot_base64 = create_plot_base64(fig)
            
        except Exception as e:
            plot_base64 = None
            print(f"可视化创建失败: {e}")
        
        # 计算最终的预测准确性指标
        mape = np.mean(np.abs((ts_data[1:] - fitted_values[1:]) / ts_data[1:])) * 100 if len(fitted_values) > 1 else 0
        rmse = np.sqrt(np.mean((ts_data - fitted_values) ** 2))
        mae = np.mean(np.abs(ts_data - fitted_values))
        
        return {
            'best_model': model_name,
            'best_order': best_order,
            'best_seasonal_order': best_seasonal_order,
            'best_aic': float(best_aic),
            'best_bic': float(best_model.bic),
            'best_llf': float(best_model.llf),
            'fitting_method': best_model_info.get('method', 'lbfgs'),
            'converged': True,  # 由于我们的增强检查，到这里的模型都是收敛的
            
            # 智能检测结果
            'stationarity_analysis': {
                'suggested_d': suggested_d,
                'stationarity_message': stationarity_msg,
                'detected_period': m if seasonal else None
            },
            
            # 搜索统计
            'search_statistics': {
                'search_time': round(time.time() - start_time, 2),
                'models_evaluated': len(results),
                'search_range': f'p≤{max_p}, d≤{max_d}, q≤{max_q}',
                'seasonal_enabled': seasonal
            },
            
            # 预测准确性指标
            'accuracy_metrics': {
                'rmse': float(rmse),
                'mae': float(mae),
                'mape': float(mape),
                'residual_std': float(np.std(residuals))
            },
            
            # 模型诊断
            'diagnostics': {
                'ljung_box_p': float(ljung_box_p) if ljung_box_p is not None else None,
                'jarque_bera_p': float(jarque_bera_p) if jarque_bera_p is not None else None,
                'durbin_watson': float(durbin_watson) if durbin_watson is not None else None
            },
            
            # 模型比较
            'model_comparison': sorted(results, key=lambda x: x['aic'])[:5],
            
            # 时间序列数据
            'fitted_values': fitted_values.tolist(),
            'residuals': residuals.tolist(),
            'forecast': forecast_result.tolist(),
            'forecast_steps': forecast_steps,
            'forecast_ci_lower': forecast_ci[:, 0].tolist() if len(forecast_ci) > 0 else [],
            'forecast_ci_upper': forecast_ci[:, 1].tolist() if len(forecast_ci) > 0 else [],
            
            # 可视化
            'visualization': plot_base64
        }
        
    except Exception as e:
        return {"error": f"自动ARIMA模型选择错误: {str(e)}"}

@mcp.tool()
def seasonal_decomposition(data: List[float], model: str = 'additive', period: Optional[int] = None, 
                          extrapolate_trend: str = 'freq') -> Dict[str, Any]:
    """
    时间序列季节性分解
    
    Args:
        data: 时间序列数据
        model: 分解模型类型 ('additive', 'multiplicative')
        period: 季节性周期
        extrapolate_trend: 趋势外推方法
    
    Returns:
        分解结果和可视化
    """
    try:
        if not STATSMODELS_AVAILABLE:
            return {"error": "statsmodels库未安装，无法进行季节性分解"}
        
        from statsmodels.tsa.seasonal import seasonal_decompose
        
        # 转换数据格式
        ts_data = np.array(data)
        
        # 自动检测周期
        if period is None:
            if len(ts_data) >= 24:
                period = min(12, len(ts_data) // 2)
            else:
                period = max(2, len(ts_data) // 4)
        
        # 执行季节性分解
        decomposition = seasonal_decompose(
            ts_data, 
            model=model, 
            period=period,
            extrapolate_trend=extrapolate_trend
        )
        
        # 创建可视化
        fig, axes = plt.subplots(4, 1, figsize=(15, 12))
        fig.suptitle(f'时间序列{model}分解 (周期={period})', fontsize=16)
        
        # 原始数据
        axes[0].plot(decomposition.observed, label='原始数据', linewidth=2)
        axes[0].set_title('原始时间序列')
        axes[0].legend()
        axes[0].grid(True, alpha=0.3)
        
        # 趋势分量
        axes[1].plot(decomposition.trend, label='趋势分量', color='orange', linewidth=2)
        axes[1].set_title('趋势分量')
        axes[1].legend()
        axes[1].grid(True, alpha=0.3)
        
        # 季节性分量
        axes[2].plot(decomposition.seasonal, label='季节性分量', color='green', linewidth=2)
        axes[2].set_title('季节性分量')
        axes[2].legend()
        axes[2].grid(True, alpha=0.3)
        
        # 残差分量
        axes[3].plot(decomposition.resid, label='残差分量', color='red', linewidth=2)
        axes[3].set_title('残差分量')
        axes[3].legend()
        axes[3].grid(True, alpha=0.3)
        
        plt.tight_layout()
        plot_base64 = create_plot_base64(fig)
        
        # 计算分解质量指标 - 修复：正确处理pandas Series
        trend_data = decomposition.trend[~np.isnan(decomposition.trend)]
        seasonal_data = decomposition.seasonal[~np.isnan(decomposition.seasonal)]
        resid_data = decomposition.resid[~np.isnan(decomposition.resid)]
        
        if len(trend_data) > 0 and len(resid_data) > 0:
            trend_strength = max(0, 1 - np.var(resid_data) / np.var(trend_data + resid_data))
        else:
            trend_strength = 0
            
        if len(seasonal_data) > 0 and len(resid_data) > 0:
            seasonal_strength = max(0, 1 - np.var(resid_data) / np.var(seasonal_data + resid_data))
        else:
            seasonal_strength = 0
        
        return {
            "decomposition_type": model,
            "period": period,
            "trend": trend_data.tolist(),
            "seasonal": seasonal_data.tolist(),
            "residual": resid_data.tolist(),
            "trend_strength": float(trend_strength),
            "seasonal_strength": float(seasonal_strength),
            "visualization": plot_base64
        }
        
    except Exception as e:
        return {"error": f"季节性分解错误: {str(e)}"}


@mcp.tool()
def time_series_classification(X_data: List[List[float]], y_data: List[int], 
                               method: str = 'intelligent_features', test_size: float = 0.3, 
                               random_state: int = 42, algorithm: str = 'auto_select',
                               auto_tune: bool = True, feature_selection: bool = True) -> Dict[str, Any]:
    """
    智能增强时间序列分类 - 完善版
    
    Args:
        X_data: 时间序列特征数据 (样本数 x 时间步长)
        y_data: 标签数据
        method: 特征提取方法 ('statistical', 'intelligent_features', 'dtw_features', 'raw', 'wavelet_features', 'ensemble_features')
        test_size: 测试集比例
        random_state: 随机种子
        algorithm: 分类算法 ('random_forest', 'xgboost', 'svm', 'ensemble', 'auto_select')
        auto_tune: 是否自动调参
        feature_selection: 是否进行特征选择
    
    Returns:
        分类结果和评估指标
    """
    try:
        from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV
        from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, VotingClassifier
        from sklearn.svm import SVC
        from sklearn.metrics import classification_report, confusion_matrix, accuracy_score, roc_auc_score
        from sklearn.preprocessing import StandardScaler, LabelEncoder
        from sklearn.feature_selection import SelectKBest, f_classif, RFE
        from sklearn.decomposition import PCA
        from scipy import stats
        from scipy.fft import fft
        import seaborn as sns
    except ImportError:
        return {"error": "scikit-learn库未安装，无法进行分类分析"}
    
    # 智能特征提取辅助函数
    def extract_wavelet_features(series, wavelet='db4', levels=3):
        """提取小波特征"""
        try:
            import pywt
            coeffs = pywt.wavedec(series, wavelet, level=levels)
            features = []
            for coeff in coeffs:
                if len(coeff) > 0:
                    features.extend([
                        np.mean(coeff), np.std(coeff), np.max(coeff), np.min(coeff),
                        np.median(coeff), stats.skew(coeff), stats.kurtosis(coeff)
                    ])
            return features
        except ImportError:
            # 如果没有pywt，使用FFT作为替代
            fft_vals = np.abs(fft(series))
            fft_vals = fft_vals[:len(fft_vals)//2]
            if len(fft_vals) > 0:
                return [
                    np.mean(fft_vals), np.std(fft_vals), np.max(fft_vals), np.min(fft_vals),
                    np.median(fft_vals), stats.skew(fft_vals), stats.kurtosis(fft_vals)
                ]
            return [0] * 7
    
    def extract_advanced_statistical_features(series):
        """提取高级统计特征"""
        features = []
        
        # 基础统计特征
        features.extend([
            np.mean(series), np.std(series), np.var(series),
            np.max(series), np.min(series), np.median(series),
            np.percentile(series, 25), np.percentile(series, 75),
            stats.skew(series), stats.kurtosis(series)
        ])
        
        # 分布特征
        features.extend([
            np.max(series) - np.min(series),  # 范围
            np.mean(np.abs(series - np.mean(series))),  # 平均绝对偏差
            len(series[series > np.mean(series)]) / len(series),  # 高于均值的比例
            np.sum(series > np.percentile(series, 75)) / len(series),  # 高于75%分位数的比例
            np.sum(series < np.percentile(series, 25)) / len(series),  # 低于25%分位数的比例
        ])
        
        # 变化和趋势特征
        diff_series = np.diff(series)
        if len(diff_series) > 0:
            features.extend([
                np.mean(diff_series), np.std(diff_series),
                np.sum(diff_series > 0) / len(diff_series),  # 上升比例
                np.sum(np.abs(diff_series) > np.std(diff_series)) / len(diff_series),  # 异常变化比例
                np.max(diff_series), np.min(diff_series)
            ])
        else:
            features.extend([0] * 6)
        
        # 二阶差分特征
        if len(series) > 2:
            diff2_series = np.diff(series, 2)
            features.extend([
                np.mean(diff2_series), np.std(diff2_series),
                np.sum(np.abs(diff2_series)) / len(series),  # 二阶差分的平均绝对值
            ])
        else:
            features.extend([0] * 3)
        
        # 自相关特征
        try:
            autocorr_1 = np.corrcoef(series[:-1], series[1:])[0, 1] if len(series) > 1 else 0
            autocorr_5 = np.corrcoef(series[:-5], series[5:])[0, 1] if len(series) > 5 else 0
            features.extend([autocorr_1, autocorr_5])
        except:
            features.extend([0, 0])
        
        return features
    
    def extract_frequency_features(series):
        """提取频域特征"""
        try:
            fft_vals = np.abs(fft(series))
            fft_vals = fft_vals[:len(fft_vals)//2]  # 只取正频率部分
            
            if len(fft_vals) > 0:
                # 基础频域特征
                features = [
                    np.max(fft_vals), np.mean(fft_vals), np.std(fft_vals),
                    np.argmax(fft_vals),  # 主频率
                    np.sum(fft_vals[:len(fft_vals)//4]) / np.sum(fft_vals),  # 低频能量比例
                    np.sum(fft_vals[len(fft_vals)//4:len(fft_vals)//2]) / np.sum(fft_vals),  # 中频能量比例
                    np.sum(fft_vals[len(fft_vals)//2:]) / np.sum(fft_vals),  # 高频能量比例
                ]
                
                # 频谱质心和带宽
                freqs = np.arange(len(fft_vals))
                spectral_centroid = np.sum(freqs * fft_vals) / np.sum(fft_vals)
                spectral_bandwidth = np.sqrt(np.sum(((freqs - spectral_centroid) ** 2) * fft_vals) / np.sum(fft_vals))
                features.extend([spectral_centroid, spectral_bandwidth])
                
                return features
            else:
                return [0] * 9
        except:
            return [0] * 9
    
    def assess_data_characteristics(X, y):
        """评估数据特征"""
        n_samples, n_timepoints = X.shape
        n_classes = len(np.unique(y))
        
        # 计算序列长度统计
        avg_length = n_timepoints
        
        # 计算类别平衡性
        class_counts = np.bincount(y)
        class_balance = np.min(class_counts) / np.max(class_counts)
        
        # 计算数据复杂度
        complexity_score = 0
        for series in X[:min(50, len(X))]:
            complexity_score += np.std(np.diff(series)) / (np.std(series) + 1e-8)
        complexity_score /= min(50, len(X))
        
        return {
            'n_samples': n_samples,
            'n_timepoints': n_timepoints,
            'n_classes': n_classes,
            'class_balance': class_balance,
            'complexity_score': complexity_score,
            'avg_length': avg_length
        }
    
    try:
        
        # 转换数据格式
        X = np.array(X_data)
        y = np.array(y_data)
        
        # 标签编码
        label_encoder = LabelEncoder()
        y_encoded = label_encoder.fit_transform(y)
        class_names = label_encoder.classes_
        
        # 评估数据特征
        data_chars = assess_data_characteristics(X, y_encoded)
        
        # 智能特征提取
        if method == 'intelligent_features':
            # 综合智能特征提取（统计+频域+小波+形状特征）
            features = []
            for series in X:
                feature_vector = []
                
                # 高级统计特征
                feature_vector.extend(extract_advanced_statistical_features(series))
                
                # 频域特征
                feature_vector.extend(extract_frequency_features(series))
                
                # 小波特征
                feature_vector.extend(extract_wavelet_features(series))
                
                features.append(feature_vector)
            X_features = np.array(features)
            
        elif method == 'wavelet_features':
            # 专门的小波特征提取
            features = []
            for series in X:
                feature_vector = extract_wavelet_features(series, levels=4)
                # 添加基础统计特征
                feature_vector.extend([
                    np.mean(series), np.std(series), np.max(series), np.min(series),
                    stats.skew(series), stats.kurtosis(series)
                ])
                features.append(feature_vector)
            X_features = np.array(features)
            
        elif method == 'ensemble_features':
            # 集成多种特征提取方法
            features = []
            for series in X:
                feature_vector = []
                
                # 统计特征（权重较高）
                stat_features = extract_advanced_statistical_features(series)
                feature_vector.extend(stat_features)
                
                # 频域特征
                freq_features = extract_frequency_features(series)
                feature_vector.extend(freq_features)
                
                # 小波特征（降维）
                wavelet_features = extract_wavelet_features(series, levels=2)
                feature_vector.extend(wavelet_features[:10])  # 只取前10个小波特征
                
                features.append(feature_vector)
            X_features = np.array(features)
            
        elif method == 'enhanced_features':
            # 综合特征提取（统计+频域+形状+趋势特征）
            features = []
            for series in X:
                feature_vector = []
                
                # 基础统计特征
                feature_vector.extend([
                    np.mean(series), np.std(series), np.var(series),
                    np.max(series), np.min(series), np.median(series),
                    np.percentile(series, 25), np.percentile(series, 75),
                    stats.skew(series), stats.kurtosis(series)
                ])
                
                # 趋势和变化特征
                diff_series = np.diff(series)
                feature_vector.extend([
                    np.mean(diff_series), np.std(diff_series),
                    np.sum(diff_series > 0) / len(diff_series),  # 上升比例
                    np.sum(np.abs(diff_series) > np.std(diff_series)) / len(diff_series),  # 异常变化比例
                    np.max(diff_series), np.min(diff_series)
                ])
                
                # 频域特征
                try:
                    fft_vals = np.abs(fft(series))
                    fft_vals = fft_vals[:len(fft_vals)//2]  # 只取正频率部分
                    if len(fft_vals) > 0:
                        feature_vector.extend([
                            np.max(fft_vals), np.mean(fft_vals), np.std(fft_vals),
                            np.argmax(fft_vals),  # 主频率
                            np.sum(fft_vals[:len(fft_vals)//4]) / np.sum(fft_vals),  # 低频能量比例
                        ])
                    else:
                        feature_vector.extend([0, 0, 0, 0, 0])
                except:
                    feature_vector.extend([0, 0, 0, 0, 0])
                
                # 形状和分布特征
                feature_vector.extend([
                    len(series),
                    np.max(series) - np.min(series),  # 范围
                    np.mean(np.abs(series - np.mean(series))),  # 平均绝对偏差
                    len(series[series > np.mean(series)]) / len(series),  # 高于均值的比例
                    np.sum(np.abs(np.diff(series, 2))) / len(series),  # 二阶差分的平均绝对值
                ])
                
                features.append(feature_vector)
            X_features = np.array(features)
            
        elif method == 'statistical':
            # 基础统计特征
            features = []
            for series in X:
                feature_vector = [
                    np.mean(series), np.std(series), np.min(series), np.max(series),
                    np.median(series), np.percentile(series, 25), np.percentile(series, 75),
                    len(series[series > np.mean(series)]) / len(series),
                    np.sum(np.diff(series) > 0) / (len(series) - 1),
                ]
                features.append(feature_vector)
            X_features = np.array(features)
            
        elif method == 'dtw_features':
            # 改进的DTW距离特征
            def enhanced_dtw_distance(s1, s2):
                from scipy.spatial.distance import euclidean
                min_len = min(len(s1), len(s2))
                # 标准化序列
                s1_norm = (s1[:min_len] - np.mean(s1[:min_len])) / (np.std(s1[:min_len]) + 1e-8)
                s2_norm = (s2[:min_len] - np.mean(s2[:min_len])) / (np.std(s2[:min_len]) + 1e-8)
                return euclidean(s1_norm, s2_norm)
            
            # 选择代表性序列（每个类别选择中心序列）
            representative_indices = []
            for class_label in np.unique(y_encoded):
                class_indices = np.where(y_encoded == class_label)[0]
                if len(class_indices) > 0:
                    class_data = X[class_indices]
                    center_idx = class_indices[np.argmin([np.std(series) for series in class_data])]
                    representative_indices.append(center_idx)
            
            features = []
            for series in X:
                feature_vector = []
                for rep_idx in representative_indices:
                    dist = enhanced_dtw_distance(series, X[rep_idx])
                    feature_vector.append(dist)
                # 添加统计特征
                feature_vector.extend([
                    np.mean(series), np.std(series), np.max(series) - np.min(series),
                    stats.skew(series), stats.kurtosis(series)
                ])
                features.append(feature_vector)
            X_features = np.array(features)
            
        else:  # raw
            # 原始时间序列（填充到相同长度）
            max_len = max(len(series) for series in X)
            X_features = np.array([np.pad(series, (0, max_len - len(series)), 'constant') for series in X])
        
        # 特征选择（如果启用）
        if feature_selection and X_features.shape[1] > 20:
            # 使用SelectKBest进行特征选择
            k_features = min(max(10, X_features.shape[1] // 3), 50)
            selector = SelectKBest(score_func=f_classif, k=k_features)
            X_features = selector.fit_transform(X_features, y_encoded)
            selected_features = selector.get_support(indices=True)
        else:
            selected_features = None
        
        # 数据标准化
        scaler = StandardScaler()
        X_scaled = scaler.fit_transform(X_features)
        
        # 划分训练集和测试集
        X_train, X_test, y_train, y_test = train_test_split(
            X_scaled, y_encoded, test_size=test_size, random_state=random_state, stratify=y_encoded
        )
        
        # 智能模型选择和训练
        if algorithm == 'auto_select':
            # 基于数据特征自动选择最佳算法
            algorithms_to_try = []
            
            # 根据数据特征选择合适的算法
            if data_chars['n_samples'] < 100:
                algorithms_to_try = ['svm', 'random_forest']
            elif data_chars['n_samples'] < 1000:
                algorithms_to_try = ['random_forest', 'xgboost', 'svm']
            else:
                algorithms_to_try = ['xgboost', 'random_forest', 'ensemble']
            
            # 如果类别不平衡，优先使用集成方法
            if data_chars['class_balance'] < 0.5:
                algorithms_to_try.insert(0, 'ensemble')
            
            best_score = 0
            best_algorithm = 'random_forest'
            best_classifier = None
            
            for alg in algorithms_to_try:
                try:
                    if alg == 'random_forest':
                        clf = RandomForestClassifier(
                            n_estimators=100, max_depth=10, min_samples_split=5,
                            random_state=random_state, n_jobs=-1
                        )
                    elif alg == 'xgboost':
                        try:
                            from xgboost import XGBClassifier
                            clf = XGBClassifier(
                                n_estimators=100, max_depth=6, learning_rate=0.1,
                                random_state=random_state, n_jobs=-1
                            )
                        except ImportError:
                            clf = GradientBoostingClassifier(
                                n_estimators=100, max_depth=6, learning_rate=0.1,
                                random_state=random_state
                            )
                    elif alg == 'svm':
                        clf = SVC(
                            kernel='rbf', C=1.0, gamma='scale', probability=True,
                            random_state=random_state
                        )
                    elif alg == 'ensemble':
                        rf = RandomForestClassifier(n_estimators=50, random_state=random_state)
                        gb = GradientBoostingClassifier(n_estimators=50, random_state=random_state)
                        svm = SVC(probability=True, random_state=random_state)
                        clf = VotingClassifier(
                            estimators=[('rf', rf), ('gb', gb), ('svm', svm)],
                            voting='soft'
                        )
                    
                    # 快速交叉验证评估
                    cv_score = np.mean(cross_val_score(clf, X_train, y_train, cv=3, scoring='accuracy'))
                    
                    if cv_score > best_score:
                        best_score = cv_score
                        best_algorithm = alg
                        best_classifier = clf
                        
                except Exception as e:
                    continue
            
            classifier = best_classifier
            selected_algorithm = best_algorithm
            
        elif algorithm == 'random_forest':
            classifier = RandomForestClassifier(
                n_estimators=200, max_depth=15, min_samples_split=5,
                min_samples_leaf=2, random_state=random_state, n_jobs=-1
            )
        elif algorithm == 'xgboost':
            try:
                from xgboost import XGBClassifier
                classifier = XGBClassifier(
                    n_estimators=100, max_depth=6, learning_rate=0.1,
                    random_state=random_state, n_jobs=-1
                )
            except ImportError:
                classifier = GradientBoostingClassifier(
                    n_estimators=100, max_depth=6, learning_rate=0.1,
                    random_state=random_state
                )
        elif algorithm == 'svm':
            classifier = SVC(
                kernel='rbf', C=1.0, gamma='scale', probability=True,
                random_state=random_state
            )
        elif algorithm == 'ensemble':
            # 集成学习
            rf = RandomForestClassifier(n_estimators=100, random_state=random_state)
            gb = GradientBoostingClassifier(n_estimators=100, random_state=random_state)
            svm = SVC(probability=True, random_state=random_state)
            classifier = VotingClassifier(
                estimators=[('rf', rf), ('gb', gb), ('svm', svm)],
                voting='soft'
            )
        else:
            classifier = RandomForestClassifier(n_estimators=100, random_state=random_state)
            selected_algorithm = algorithm
        
        # 自动调参（如果启用）
        if auto_tune and algorithm != 'auto_select':
            param_grids = {
                'random_forest': {
                    'n_estimators': [100, 200],
                    'max_depth': [10, 15, None],
                    'min_samples_split': [2, 5]
                },
                'svm': {
                    'C': [0.1, 1, 10],
                    'gamma': ['scale', 'auto']
                },
                'xgboost': {
                    'n_estimators': [100, 200],
                    'max_depth': [3, 6, 9],
                    'learning_rate': [0.1, 0.2]
                }
            }
            
            if algorithm in param_grids:
                try:
                    grid_search = GridSearchCV(
                        classifier, param_grids[algorithm], 
                        cv=3, scoring='accuracy', n_jobs=-1
                    )
                    grid_search.fit(X_train, y_train)
                    classifier = grid_search.best_estimator_
                    best_params = grid_search.best_params_
                except:
                    best_params = None
            else:
                best_params = None
        else:
            best_params = None
        
        # 训练模型
        classifier.fit(X_train, y_train)
        
        # 预测
        y_pred = classifier.predict(X_test)
        y_pred_proba = classifier.predict_proba(X_test)
        
        # 交叉验证评估
        cv_scores = cross_val_score(classifier, X_scaled, y_encoded, cv=5, scoring='accuracy')
        
        # 评估指标
        accuracy = accuracy_score(y_test, y_pred)
        classification_rep = classification_report(y_test, y_pred, 
                                                 target_names=[str(name) for name in class_names],
                                                 output_dict=True, zero_division=0)
        conf_matrix = confusion_matrix(y_test, y_pred)
        
        # ROC-AUC（多分类）
        try:
            if len(class_names) == 2:
                roc_auc = roc_auc_score(y_test, y_pred_proba[:, 1])
            else:
                roc_auc = roc_auc_score(y_test, y_pred_proba, multi_class='ovr', average='weighted')
        except:
            roc_auc = None
        
        # 创建增强的可视化
        fig, axes = plt.subplots(3, 3, figsize=(20, 15))
        algorithm_display = selected_algorithm if algorithm == 'auto_select' else algorithm
        fig.suptitle(f'智能时间序列分类结果 - 完善版\n方法: {method}, 算法: {algorithm_display}, 数据质量: {data_chars["class_balance"]:.2f}', fontsize=16)
        
        # 混淆矩阵
        sns.heatmap(conf_matrix, annot=True, fmt='d', cmap='Blues', 
                   xticklabels=class_names, yticklabels=class_names, ax=axes[0, 0])
        axes[0, 0].set_title('混淆矩阵')
        axes[0, 0].set_xlabel('预测标签')
        axes[0, 0].set_ylabel('真实标签')
        
        # 特征重要性
        if hasattr(classifier, 'feature_importances_'):
            feature_importance = classifier.feature_importances_
            top_indices = np.argsort(feature_importance)[-15:]
            axes[0, 1].barh(range(len(top_indices)), feature_importance[top_indices], color='skyblue')
            axes[0, 1].set_title('Top 15 特征重要性')
            axes[0, 1].set_xlabel('重要性')
            axes[0, 1].set_yticks(range(len(top_indices)))
            axes[0, 1].set_yticklabels([f'F{i}' for i in top_indices])
        else:
            axes[0, 1].text(0.5, 0.5, '该算法不支持\n特征重要性分析', 
                           ha='center', va='center', transform=axes[0, 1].transAxes)
            axes[0, 1].set_title('特征重要性')
        
        # 性能指标
        metrics = ['准确率', 'CV均值', 'CV标准差']
        values = [accuracy, np.mean(cv_scores), np.std(cv_scores)]
        if roc_auc is not None:
            metrics.append('ROC-AUC')
            values.append(roc_auc)
        
        bars = axes[0, 2].bar(metrics, values, color=['#3498db', '#e74c3c', '#f39c12', '#2ecc71'][:len(values)])
        axes[0, 2].set_title('性能指标')
        axes[0, 2].set_ylabel('分数')
        axes[0, 2].set_ylim(0, 1)
        axes[0, 2].tick_params(axis='x', rotation=45)
        
        for bar, value in zip(bars, values):
            axes[0, 2].text(bar.get_x() + bar.get_width()/2, bar.get_height() + 0.01, 
                           f'{value:.3f}', ha='center', va='bottom')
        
        # 类别分布
        unique, counts = np.unique(y_encoded, return_counts=True)
        class_labels = [class_names[i] for i in unique]
        axes[1, 0].pie(counts, labels=class_labels, autopct='%1.1f%%', startangle=90)
        axes[1, 0].set_title('类别分布')
        
        # 预测概率分布
        for i, class_name in enumerate(class_names):
            axes[1, 1].hist(y_pred_proba[:, i], alpha=0.6, label=f'类别 {class_name}', bins=20)
        axes[1, 1].set_title('预测概率分布')
        axes[1, 1].set_xlabel('预测概率')
        axes[1, 1].set_ylabel('频次')
        axes[1, 1].legend()
        
        # 交叉验证分数
        axes[1, 2].plot(range(1, len(cv_scores) + 1), cv_scores, 'bo-', linewidth=2, markersize=8)
        axes[1, 2].axhline(y=np.mean(cv_scores), color='red', linestyle='--', 
                          label=f'均值: {np.mean(cv_scores):.3f}')
        axes[1, 2].fill_between(range(1, len(cv_scores) + 1), 
                               np.mean(cv_scores) - np.std(cv_scores),
                               np.mean(cv_scores) + np.std(cv_scores),
                               alpha=0.2, color='red')
        axes[1, 2].set_title('交叉验证分数')
        axes[1, 2].set_xlabel('折数')
        axes[1, 2].set_ylabel('准确率')
        axes[1, 2].legend()
        axes[1, 2].grid(True, alpha=0.3)
        
        # 数据特征分析
        data_metrics = ['样本数', '时间点数', '类别数', '类别平衡性', '复杂度']
        data_values = [
            data_chars['n_samples'], data_chars['n_timepoints'], 
            data_chars['n_classes'], data_chars['class_balance'], 
            data_chars['complexity_score']
        ]
        # 标准化显示
        normalized_values = [v/max(data_values) for v in data_values]
        bars = axes[2, 0].bar(data_metrics, normalized_values, color=['#3498db', '#e74c3c', '#f39c12', '#2ecc71', '#9b59b6'])
        axes[2, 0].set_title('数据特征分析（标准化）')
        axes[2, 0].set_ylabel('标准化值')
        axes[2, 0].tick_params(axis='x', rotation=45)
        for bar, value in zip(bars, data_values):
            axes[2, 0].text(bar.get_x() + bar.get_width()/2, bar.get_height() + 0.01, 
                           f'{value:.2f}', ha='center', va='bottom', fontsize=8)
        
        # 特征重要性分布（如果有特征选择）
        if selected_features is not None:
            axes[2, 1].hist(selected_features, bins=20, alpha=0.7, color='skyblue', edgecolor='black')
            axes[2, 1].set_title(f'选择的特征分布 (共{len(selected_features)}个)')
            axes[2, 1].set_xlabel('特征索引')
            axes[2, 1].set_ylabel('频次')
            axes[2, 1].grid(True, alpha=0.3)
        else:
            axes[2, 1].text(0.5, 0.5, f'未进行特征选择\n总特征数: {X_features.shape[1]}', 
                           ha='center', va='center', transform=axes[2, 1].transAxes)
            axes[2, 1].set_title('特征选择信息')
        
        # 智能分析报告
        analysis_text = f"""智能分析报告:
• 数据规模: {data_chars['n_samples']}样本 × {data_chars['n_timepoints']}时间点
• 分类任务: {data_chars['n_classes']}类别分类
• 类别平衡性: {data_chars['class_balance']:.3f}
• 数据复杂度: {data_chars['complexity_score']:.3f}
• 选择算法: {algorithm_display}
• 特征提取: {method}
• 模型准确率: {accuracy:.3f}
• 交叉验证: {np.mean(cv_scores):.3f}±{np.std(cv_scores):.3f}"""
        
        if best_params:
            analysis_text += f"\n• 最优参数: {best_params}"
        if roc_auc:
            analysis_text += f"\n• ROC-AUC: {roc_auc:.3f}"
        
        axes[2, 2].text(0.05, 0.95, analysis_text, transform=axes[2, 2].transAxes, 
                        fontsize=10, verticalalignment='top', 
                        bbox=dict(boxstyle='round', facecolor='lightblue', alpha=0.8))
        axes[2, 2].set_title('智能分析报告')
        axes[2, 2].axis('off')
        
        plt.tight_layout()
        plot_base64 = create_plot_base64(fig)
        
        return {
            "method": method,
            "algorithm": algorithm,
            "selected_algorithm": selected_algorithm,
            "accuracy": float(accuracy),
            "cv_mean": float(np.mean(cv_scores)),
            "cv_std": float(np.std(cv_scores)),
            "roc_auc": float(roc_auc) if roc_auc is not None else None,
            "classification_report": classification_rep,
            "confusion_matrix": conf_matrix.tolist(),
            "predictions": y_pred.tolist(),
            "prediction_probabilities": y_pred_proba.tolist(),
            "feature_importance": classifier.feature_importances_.tolist() if hasattr(classifier, 'feature_importances_') else None,
            "class_names": class_names.tolist(),
            "cv_scores": cv_scores.tolist(),
            "best_params": best_params,
            "selected_features": selected_features.tolist() if selected_features is not None else None,
            "data_characteristics": data_chars,
            "intelligent_analysis": {
                "auto_selected_algorithm": selected_algorithm if algorithm == 'auto_select' else None,
                "feature_selection_applied": selected_features is not None,
                "n_selected_features": len(selected_features) if selected_features is not None else X_features.shape[1],
                "auto_tuning_applied": best_params is not None,
                "data_quality_score": data_chars['class_balance'],
                "complexity_assessment": "高" if data_chars['complexity_score'] > 1.0 else "中" if data_chars['complexity_score'] > 0.5 else "低",
                "recommended_improvements": [
                    "考虑增加更多训练样本" if data_chars['n_samples'] < 200 else None,
                    "建议处理类别不平衡" if data_chars['class_balance'] < 0.5 else None,
                    "可尝试更复杂的特征工程" if data_chars['complexity_score'] < 0.3 else None
                ]
            },
            "visualization": plot_base64
        }
        
    except Exception as e:
        return {"error": f"时间序列分类错误: {str(e)}"}


@mcp.tool()
def time_series_clustering(data: List[List[float]], n_clusters: int = 3, 
                          method: str = 'kmeans', distance_metric: str = 'euclidean',
                          feature_extraction: str = 'raw', auto_select_k: bool = False,
                          max_k: int = 10) -> Dict[str, Any]:
    """
    增强的时间序列聚类分析
    
    Args:
        data: 时间序列数据列表
        n_clusters: 聚类数量
        method: 聚类方法 ('kmeans', 'hierarchical', 'dbscan', 'gaussian_mixture')
        distance_metric: 距离度量 ('euclidean', 'correlation', 'dtw', 'cosine')
        feature_extraction: 特征提取方法 ('raw', 'statistical', 'fourier', 'wavelet')
        auto_select_k: 是否自动选择最优聚类数
        max_k: 自动选择时的最大聚类数
    
    Returns:
        聚类结果和可视化
    """
    try:
        from sklearn.cluster import KMeans, AgglomerativeClustering, DBSCAN
        from sklearn.mixture import GaussianMixture
        from sklearn.preprocessing import StandardScaler
        from sklearn.metrics import silhouette_score, calinski_harabasz_score, davies_bouldin_score
        from sklearn.decomposition import PCA
        from scipy.spatial.distance import pdist, squareform
        from scipy.stats import skew, kurtosis
    except ImportError:
        return {"error": "scikit-learn库未安装，无法进行聚类分析"}
    
    try:
        
        # 转换数据格式
        X = np.array(data)
        n_samples, n_timepoints = X.shape
        
        # 特征提取
        if feature_extraction == 'statistical':
            # 统计特征
            features = []
            for series in X:
                feature_vector = [
                    np.mean(series), np.std(series), np.min(series), np.max(series),
                    np.median(series), skew(series), kurtosis(series),
                    np.percentile(series, 25), np.percentile(series, 75),
                    np.sum(np.diff(series) > 0) / len(series)  # 上升趋势比例
                ]
                features.append(feature_vector)
            X_features = np.array(features)
            
        elif feature_extraction == 'fourier':
            # 傅里叶变换特征
            features = []
            for series in X:
                fft_vals = np.fft.fft(series)
                # 取前一半频率分量的幅值
                fft_features = np.abs(fft_vals[:len(fft_vals)//2])
                # 归一化
                fft_features = fft_features / np.sum(fft_features)
                features.append(fft_features)
            X_features = np.array(features)
            
        elif feature_extraction == 'wavelet':
            # 小波变换特征（简化版）
            features = []
            for series in X:
                # 简单的小波近似：多尺度平均
                wavelet_features = []
                for scale in [2, 4, 8]:
                    if scale < len(series):
                        downsampled = series[::scale]
                        wavelet_features.extend([np.mean(downsampled), np.std(downsampled)])
                    else:
                        wavelet_features.extend([np.mean(series), np.std(series)])
                features.append(wavelet_features)
            X_features = np.array(features)
            
        else:  # raw
            X_features = X
        
        # 自动选择最优聚类数
        if auto_select_k and method in ['kmeans', 'gaussian_mixture']:
            silhouette_scores = []
            k_range = range(2, min(max_k + 1, n_samples))
            
            for k in k_range:
                if method == 'kmeans':
                    temp_clustering = KMeans(n_clusters=k, random_state=42, n_init=10)
                else:
                    temp_clustering = GaussianMixture(n_components=k, random_state=42)
                
                temp_labels = temp_clustering.fit_predict(StandardScaler().fit_transform(X_features))
                if len(np.unique(temp_labels)) > 1:
                    score = silhouette_score(X_features, temp_labels)
                    silhouette_scores.append(score)
                else:
                    silhouette_scores.append(-1)
            
            if silhouette_scores:
                optimal_k = k_range[np.argmax(silhouette_scores)]
                n_clusters = optimal_k
        
        # 距离计算和聚类
        if distance_metric == 'correlation':
            # 相关性距离
            correlation_matrix = np.corrcoef(X_features)
            distance_matrix = 1 - np.abs(correlation_matrix)
            
            clustering = AgglomerativeClustering(
                n_clusters=n_clusters, 
                metric='precomputed', 
                linkage='average'
            )
            labels = clustering.fit_predict(distance_matrix)
            X_for_metrics = X_features
            
        elif distance_metric == 'cosine':
            # 余弦距离
            from sklearn.metrics.pairwise import cosine_distances
            distance_matrix = cosine_distances(X_features)
            
            clustering = AgglomerativeClustering(
                n_clusters=n_clusters,
                metric='precomputed',
                linkage='average'
            )
            labels = clustering.fit_predict(distance_matrix)
            X_for_metrics = X_features
            
        else:  # euclidean 或其他
            # 标准化数据
            scaler = StandardScaler()
            X_scaled = scaler.fit_transform(X_features)
            
            if method == 'kmeans':
                clustering = KMeans(n_clusters=n_clusters, random_state=42, n_init=10)
                labels = clustering.fit_predict(X_scaled)
            elif method == 'hierarchical':
                clustering = AgglomerativeClustering(n_clusters=n_clusters)
                labels = clustering.fit_predict(X_scaled)
            elif method == 'dbscan':
                clustering = DBSCAN(eps=0.5, min_samples=5)
                labels = clustering.fit_predict(X_scaled)
                n_clusters = len(np.unique(labels[labels != -1]))
            elif method == 'gaussian_mixture':
                clustering = GaussianMixture(n_components=n_clusters, random_state=42)
                labels = clustering.fit_predict(X_scaled)
            else:
                return {"error": f"不支持的聚类方法: {method}"}
            
            X_for_metrics = X_scaled
        
        # 计算聚类评估指标
        try:
            unique_labels = np.unique(labels)
            if len(unique_labels) > 1 and -1 not in unique_labels:  # 排除噪声点
                silhouette_avg = silhouette_score(X_for_metrics, labels)
                calinski_harabasz = calinski_harabasz_score(X_for_metrics, labels)
                davies_bouldin = davies_bouldin_score(X_for_metrics, labels)
            else:
                silhouette_avg = calinski_harabasz = davies_bouldin = None
        except:
            silhouette_avg = calinski_harabasz = davies_bouldin = None
        
        # 计算聚类中心（基于原始时间序列）
        cluster_centers = []
        cluster_info = []
        
        for i in range(n_clusters):
            cluster_mask = (labels == i)
            cluster_data = X[cluster_mask]
            
            if len(cluster_data) > 0:
                center = np.mean(cluster_data, axis=0)
                cluster_centers.append(center)
                
                # 计算聚类内统计信息
                intra_distances = pdist(cluster_data)
                cluster_info.append({
                    'size': int(np.sum(cluster_mask)),
                    'mean_intra_distance': float(np.mean(intra_distances)) if len(intra_distances) > 0 else 0,
                    'std_intra_distance': float(np.std(intra_distances)) if len(intra_distances) > 0 else 0
                })
            else:
                cluster_centers.append(np.zeros(n_timepoints))
                cluster_info.append({'size': 0, 'mean_intra_distance': 0, 'std_intra_distance': 0})
        
        # 处理DBSCAN的噪声点
        noise_count = np.sum(labels == -1) if method == 'dbscan' else 0
        
        # 创建增强的可视化
        fig, axes = plt.subplots(3, 2, figsize=(18, 15))
        fig.suptitle(f'增强时间序列聚类分析\n方法: {method}, 距离: {distance_metric}, 特征: {feature_extraction}', fontsize=16)
        
        # 聚类结果
        colors = plt.cm.Set3(np.linspace(0, 1, max(n_clusters, 3)))
        for i in range(n_clusters):
            cluster_data = X[labels == i]
            for series in cluster_data:
                axes[0, 0].plot(series, color=colors[i], alpha=0.3, linewidth=1)
            # 绘制聚类中心
            if len(cluster_data) > 0:
                axes[0, 0].plot(cluster_centers[i], color=colors[i], linewidth=3, 
                               label=f'聚类 {i} (n={cluster_info[i]["size"]})')
        
        # 处理噪声点
        if noise_count > 0:
            noise_data = X[labels == -1]
            for series in noise_data:
                axes[0, 0].plot(series, color='black', alpha=0.3, linewidth=1)
            axes[0, 0].plot([], [], color='black', alpha=0.3, label=f'噪声点 (n={noise_count})')
        
        axes[0, 0].set_title('聚类结果')
        axes[0, 0].legend()
        axes[0, 0].grid(True, alpha=0.3)
        
        # 聚类中心对比
        for i, center in enumerate(cluster_centers):
            axes[0, 1].plot(center, color=colors[i], linewidth=2, 
                           marker='o', label=f'聚类 {i}')
        axes[0, 1].set_title('聚类中心对比')
        axes[0, 1].legend()
        axes[0, 1].grid(True, alpha=0.3)
        
        # 聚类分布
        unique, counts = np.unique(labels[labels != -1], return_counts=True)
        bars = axes[1, 0].bar(unique, counts, color=colors[:len(unique)])
        if noise_count > 0:
            axes[1, 0].bar([-1], [noise_count], color='black', alpha=0.7, label='噪声点')
        axes[1, 0].set_title('聚类分布')
        axes[1, 0].set_xlabel('聚类标签')
        axes[1, 0].set_ylabel('样本数量')
        
        # PCA可视化（如果特征维度>2）
        if X_for_metrics.shape[1] > 2:
            pca = PCA(n_components=2)
            X_pca = pca.fit_transform(X_for_metrics)
            
            for i in range(n_clusters):
                cluster_mask = (labels == i)
                if np.sum(cluster_mask) > 0:
                    axes[1, 1].scatter(X_pca[cluster_mask, 0], X_pca[cluster_mask, 1], 
                                     c=colors[i], label=f'聚类 {i}', alpha=0.7)
            
            if noise_count > 0:
                noise_mask = (labels == -1)
                axes[1, 1].scatter(X_pca[noise_mask, 0], X_pca[noise_mask, 1], 
                                 c='black', label='噪声点', alpha=0.7, marker='x')
            
            axes[1, 1].set_title(f'PCA可视化 (解释方差: {pca.explained_variance_ratio_.sum():.3f})')
            axes[1, 1].set_xlabel(f'PC1 ({pca.explained_variance_ratio_[0]:.3f})')
            axes[1, 1].set_ylabel(f'PC2 ({pca.explained_variance_ratio_[1]:.3f})')
            axes[1, 1].legend()
            axes[1, 1].grid(True, alpha=0.3)
        else:
            axes[1, 1].text(0.5, 0.5, '特征维度≤2\n无需PCA降维', 
                           transform=axes[1, 1].transAxes, ha='center', va='center')
            axes[1, 1].set_title('PCA可视化')
        
        # 评估指标
        metrics_text = f"聚类评估指标:\n\n"
        if silhouette_avg is not None:
            metrics_text += f"轮廓系数: {silhouette_avg:.3f}\n"
        if calinski_harabasz is not None:
            metrics_text += f"Calinski-Harabasz: {calinski_harabasz:.3f}\n"
        if davies_bouldin is not None:
            metrics_text += f"Davies-Bouldin: {davies_bouldin:.3f}\n"
        metrics_text += f"\n聚类数量: {n_clusters}\n"
        metrics_text += f"样本总数: {n_samples}\n"
        if noise_count > 0:
            metrics_text += f"噪声点: {noise_count}\n"
        metrics_text += f"特征维度: {X_for_metrics.shape[1]}"
        
        axes[2, 0].text(0.1, 0.5, metrics_text, transform=axes[2, 0].transAxes, 
                        fontsize=12, verticalalignment='center',
                        bbox=dict(boxstyle="round,pad=0.3", facecolor="lightblue", alpha=0.8))
        axes[2, 0].set_title('聚类评估指标')
        axes[2, 0].axis('off')
        
        # 聚类内距离分布
        if len(cluster_info) > 0:
            cluster_ids = list(range(n_clusters))
            intra_distances = [info['mean_intra_distance'] for info in cluster_info]
            
            bars = axes[2, 1].bar(cluster_ids, intra_distances, color=colors[:n_clusters])
            axes[2, 1].set_title('聚类内平均距离')
            axes[2, 1].set_xlabel('聚类ID')
            axes[2, 1].set_ylabel('平均距离')
            axes[2, 1].grid(True, alpha=0.3)
        
        plt.tight_layout()
        plot_base64 = create_plot_base64(fig)
        
        return {
            "method": method,
            "distance_metric": distance_metric,
            "feature_extraction": feature_extraction,
            "n_clusters": n_clusters,
            "auto_selected_k": auto_select_k,
            "labels": labels.tolist(),
            "cluster_centers": [center.tolist() for center in cluster_centers],
            "cluster_info": cluster_info,
            "noise_count": noise_count,
            "silhouette_score": float(silhouette_avg) if silhouette_avg is not None else None,
            "calinski_harabasz_score": float(calinski_harabasz) if calinski_harabasz is not None else None,
            "davies_bouldin_score": float(davies_bouldin) if davies_bouldin is not None else None,
            "feature_dimensions": X_for_metrics.shape[1],
            "visualization": plot_base64
        }
        
    except Exception as e:
        return {"error": f"时间序列聚类错误: {str(e)}"}


def detect_seasonality(data):
    """智能检测时间序列的季节性周期"""
    try:
        from scipy.fft import fft
        from scipy.signal import find_peaks
        
        n = len(data)
        if n < 20:
            return None
        
        # 使用FFT检测主要频率
        fft_vals = np.abs(fft(data - np.mean(data)))
        freqs = np.fft.fftfreq(n)
        
        # 找到主要频率峰值
        peaks, _ = find_peaks(fft_vals[:n//2], height=np.max(fft_vals)*0.1)
        
        if len(peaks) > 0:
            # 选择最强的频率
            main_freq_idx = peaks[np.argmax(fft_vals[peaks])]
            if freqs[main_freq_idx] > 0:
                period = int(1 / freqs[main_freq_idx])
                # 验证周期的合理性
                if 3 <= period <= n // 3:
                    return period
        
        # 备用方法：自相关检测
        autocorr = np.correlate(data, data, mode='full')
        autocorr = autocorr[autocorr.size // 2:]
        
        # 寻找自相关峰值
        peaks, _ = find_peaks(autocorr[1:], height=np.max(autocorr)*0.3)
        if len(peaks) > 0:
            period = peaks[0] + 1
            if 3 <= period <= n // 3:
                return period
        
        return min(24, n // 4)  # 默认值
    except:
        return min(24, n // 4)


def estimate_contamination_rate(data):
    """智能估计异常值污染率"""
    try:
        # 使用IQR方法初步估计
        q1, q3 = np.percentile(data, [25, 75])
        iqr = q3 - q1
        lower_bound = q1 - 1.5 * iqr
        upper_bound = q3 + 1.5 * iqr
        
        outliers_iqr = np.sum((data < lower_bound) | (data > upper_bound))
        contamination_iqr = outliers_iqr / len(data)
        
        # 使用修正Z-score方法
        median = np.median(data)
        mad = np.median(np.abs(data - median))
        modified_z_scores = 0.6745 * (data - median) / (mad + 1e-8)
        outliers_z = np.sum(np.abs(modified_z_scores) > 3.5)
        contamination_z = outliers_z / len(data)
        
        # 取两种方法的平均值，并限制在合理范围内
        estimated_contamination = (contamination_iqr + contamination_z) / 2
        return max(0.01, min(0.3, estimated_contamination))
    except:
        return 0.1


def assess_data_quality(data):
    """评估数据质量"""
    try:
        quality_metrics = {
            'missing_values': np.sum(np.isnan(data)) / len(data),
            'infinite_values': np.sum(np.isinf(data)) / len(data),
            'zero_variance': np.var(data) < 1e-10,
            'skewness': abs(stats.skew(data)),
            'kurtosis': abs(stats.kurtosis(data)),
            'outlier_ratio': estimate_contamination_rate(data)
        }
        
        # 计算总体质量分数
        quality_score = 1.0
        if quality_metrics['missing_values'] > 0.1:
            quality_score -= 0.3
        if quality_metrics['infinite_values'] > 0:
            quality_score -= 0.2
        if quality_metrics['zero_variance']:
            quality_score -= 0.4
        if quality_metrics['skewness'] > 2:
            quality_score -= 0.1
        if quality_metrics['outlier_ratio'] > 0.2:
            quality_score -= 0.1
        
        quality_metrics['overall_score'] = max(0, quality_score)
        return quality_metrics
    except:
        return {'overall_score': 0.5, 'error': 'Unable to assess data quality'}


@mcp.tool()
def anomaly_detection(data: List[float], method: str = 'isolation_forest', 
                     contamination: float = 0.1, window_size: int = 10,
                     seasonal_period: int = None, sensitivity: float = 2.5,
                     auto_tune: bool = True, confidence_level: float = 0.95) -> Dict[str, Any]:
    """
    智能增强时间序列异常检测 - 完善版
    
    Args:
        data: 时间序列数据
        method: 异常检测方法 ('isolation_forest', 'statistical', 'moving_average', 'seasonal', 'ensemble', 'lstm_autoencoder', 'robust_covariance')
        contamination: 异常值比例
        window_size: 滑动窗口大小
        seasonal_period: 季节性周期（用于季节性异常检测）
        sensitivity: 敏感度参数（用于统计方法）
        auto_tune: 是否自动调优参数
        confidence_level: 置信水平
    
    Returns:
        异常检测结果和可视化
    """
    try:
        ts_data = np.array(data)
        n_points = len(ts_data)
        
        # 数据预处理和验证
        if n_points < 10:
            return {"error": "数据点数量过少，至少需要10个数据点"}
        
        # 智能参数调优
        if auto_tune:
            # 自动调整窗口大小
            if window_size is None or window_size <= 0:
                window_size = min(max(10, n_points // 20), 50)
            
            # 自动检测季节性周期
            if seasonal_period is None and method in ['seasonal', 'ensemble']:
                seasonal_period = detect_seasonality(ts_data)
            
            # 自动调整污染率
            if contamination == 0.1:  # 默认值
                contamination = estimate_contamination_rate(ts_data)
        
        # 数据质量检查
        data_quality = assess_data_quality(ts_data)
        
        if method == 'isolation_forest':
            try:
                from sklearn.ensemble import IsolationForest
                from scipy.stats import skew, kurtosis
            except ImportError:
                return {"error": "scikit-learn库未安装，无法使用Isolation Forest"}
            
            # 创建增强特征矩阵
            features = []
            for i in range(window_size, n_points):
                window = ts_data[i-window_size:i]
                feature_vector = [
                    np.mean(window),
                    np.std(window),
                    np.min(window),
                    np.max(window),
                    ts_data[i],
                    ts_data[i] - np.mean(window),  # 与窗口均值的差异
                    (ts_data[i] - np.mean(window)) / (np.std(window) + 1e-8),  # 标准化差异
                    skew(window),  # 偏度
                    kurtosis(window),  # 峰度
                    np.sum(np.diff(window) > 0) / len(window) if len(window) > 1 else 0,  # 上升趋势比例
                ]
                features.append(feature_vector)
            
            X = np.array(features)
            
            # 训练Isolation Forest
            clf = IsolationForest(contamination=contamination, random_state=42, n_estimators=200)
            anomaly_labels = clf.fit_predict(X)
            
            # 创建完整的异常标签数组
            full_anomaly_labels = np.ones(n_points)
            full_anomaly_labels[window_size:] = anomaly_labels
            
            anomaly_scores = np.ones(n_points)
            anomaly_scores[window_size:] = clf.decision_function(X)
            
        elif method == 'statistical':
            # 增强的统计异常检测
            # 使用修正的Z-score（基于中位数）
            median = np.median(ts_data)
            mad = np.median(np.abs(ts_data - median))  # 中位数绝对偏差
            modified_z_scores = 0.6745 * (ts_data - median) / (mad + 1e-8)
            
            # 结合标准Z-score
            z_scores = np.abs((ts_data - np.mean(ts_data)) / (np.std(ts_data) + 1e-8))
            
            # 综合异常分数
            combined_scores = np.maximum(np.abs(modified_z_scores), z_scores)
            
            full_anomaly_labels = np.where(combined_scores > sensitivity, -1, 1)
            anomaly_scores = -combined_scores  # 负值表示更异常
            
        elif method == 'moving_average':
            # 增强的移动平均异常检测
            # 使用指数移动平均
            alpha = 2.0 / (window_size + 1)
            ema = np.zeros(n_points)
            ema[0] = ts_data[0]
            
            for i in range(1, n_points):
                ema[i] = alpha * ts_data[i] + (1 - alpha) * ema[i-1]
            
            # 计算移动标准差
            moving_std = np.array([np.std(ts_data[max(0, i-window_size//2):min(n_points, i+window_size//2+1)]) 
                                  for i in range(n_points)])
            
            # 计算偏差
            deviations = np.abs(ts_data - ema)
            threshold = sensitivity * moving_std
            
            full_anomaly_labels = np.where(deviations > threshold, -1, 1)
            anomaly_scores = -deviations / (moving_std + 1e-8)  # 标准化偏差
            
        elif method == 'seasonal':
            # 季节性异常检测
            if seasonal_period is None:
                seasonal_period = min(24, n_points // 4)  # 默认季节周期
            
            if seasonal_period >= n_points:
                return {"error": "季节周期过大，无法进行季节性异常检测"}
            
            # 计算季节性基线
            seasonal_baseline = np.zeros(n_points)
            seasonal_std = np.zeros(n_points)
            
            for i in range(n_points):
                # 获取同一季节位置的历史数据
                season_indices = list(range(i % seasonal_period, n_points, seasonal_period))
                season_data = np.array([ts_data[idx] for idx in season_indices])
                
                seasonal_baseline[i] = np.median(season_data)
                seasonal_std[i] = np.std(season_data) if len(season_data) > 1 else np.std(ts_data)
            
            # 计算季节性偏差
            seasonal_deviations = np.abs(ts_data - seasonal_baseline)
            threshold = sensitivity * seasonal_std
            
            full_anomaly_labels = np.where(seasonal_deviations > threshold, -1, 1)
            anomaly_scores = -seasonal_deviations / (seasonal_std + 1e-8)
            
        elif method == 'robust_covariance':
            # 鲁棒协方差异常检测
            try:
                from sklearn.covariance import EllipticEnvelope
                
                # 创建特征矩阵
                features = []
                for i in range(window_size, n_points):
                    window = ts_data[i-window_size:i]
                    feature_vector = [
                        np.mean(window),
                        np.std(window),
                        np.median(window),
                        np.percentile(window, 25),
                        np.percentile(window, 75),
                        ts_data[i],
                        ts_data[i] - np.mean(window),
                        np.sum(np.diff(window) > 0) / len(window) if len(window) > 1 else 0
                    ]
                    features.append(feature_vector)
                
                X = np.array(features)
                
                # 鲁棒协方差估计
                robust_cov = EllipticEnvelope(contamination=contamination, random_state=42)
                anomaly_labels = robust_cov.fit_predict(X)
                
                full_anomaly_labels = np.ones(n_points)
                full_anomaly_labels[window_size:] = anomaly_labels
                
                anomaly_scores = np.ones(n_points)
                anomaly_scores[window_size:] = robust_cov.decision_function(X)
                
            except ImportError:
                return {"error": "scikit-learn库未安装，无法使用鲁棒协方差方法"}
        
        elif method == 'lstm_autoencoder':
            # LSTM自编码器异常检测（简化版）
            try:
                # 创建序列数据
                sequences = []
                for i in range(window_size, n_points):
                    sequences.append(ts_data[i-window_size:i])
                
                sequences = np.array(sequences)
                
                # 简化的自编码器：使用PCA作为降维重构
                from sklearn.decomposition import PCA
                
                # 标准化
                scaler = StandardScaler()
                sequences_scaled = scaler.fit_transform(sequences)
                
                # PCA降维重构
                n_components = min(max(2, window_size // 3), sequences_scaled.shape[1] - 1)
                pca = PCA(n_components=n_components)
                sequences_reduced = pca.fit_transform(sequences_scaled)
                sequences_reconstructed = pca.inverse_transform(sequences_reduced)
                
                # 计算重构误差
                reconstruction_errors = np.mean((sequences_scaled - sequences_reconstructed) ** 2, axis=1)
                
                # 设置阈值
                threshold = np.percentile(reconstruction_errors, (1 - contamination) * 100)
                
                full_anomaly_labels = np.ones(n_points)
                full_anomaly_labels[window_size:] = np.where(reconstruction_errors > threshold, -1, 1)
                
                anomaly_scores = np.ones(n_points)
                anomaly_scores[window_size:] = -reconstruction_errors  # 负值表示更异常
                
            except Exception as e:
                return {"error": f"LSTM自编码器方法失败: {str(e)}"}
        
        elif method == 'ensemble':
            # 增强集成异常检测
            methods = ['statistical', 'moving_average']
            try:
                from sklearn.ensemble import IsolationForest
                methods.append('isolation_forest')
            except ImportError:
                pass
            
            try:
                from sklearn.covariance import EllipticEnvelope
                methods.append('robust_covariance')
            except ImportError:
                pass
            
            ensemble_scores = []
            ensemble_labels = []
            
            for sub_method in methods:
                # 直接调用子方法，避免递归
                if sub_method == 'statistical':
                    # 统计方法的实现
                    median = np.median(ts_data)
                    mad = np.median(np.abs(ts_data - median))
                    modified_z_scores = 0.6745 * (ts_data - median) / (mad + 1e-8)
                    z_scores = np.abs((ts_data - np.mean(ts_data)) / (np.std(ts_data) + 1e-8))
                    combined_scores = np.maximum(np.abs(modified_z_scores), z_scores)
                    sub_labels = np.where(combined_scores > sensitivity, -1, 1)
                    sub_scores = -combined_scores
                elif sub_method == 'moving_average':
                    # 移动平均方法的实现
                    alpha = 2.0 / (window_size + 1)
                    ema = np.zeros(n_points)
                    ema[0] = ts_data[0]
                    for i in range(1, n_points):
                        ema[i] = alpha * ts_data[i] + (1 - alpha) * ema[i-1]
                    moving_std = np.array([np.std(ts_data[max(0, i-window_size//2):min(n_points, i+window_size//2+1)]) 
                                          for i in range(n_points)])
                    deviations = np.abs(ts_data - ema)
                    threshold = sensitivity * moving_std
                    sub_labels = np.where(deviations > threshold, -1, 1)
                    sub_scores = -deviations / (moving_std + 1e-8)
                elif sub_method == 'isolation_forest':
                    # 隔离森林方法的实现
                    from sklearn.ensemble import IsolationForest
                    from scipy.stats import skew, kurtosis
                    features = []
                    for i in range(window_size, n_points):
                        window = ts_data[i-window_size:i]
                        feature_vector = [
                            np.mean(window), np.std(window), np.min(window), np.max(window),
                            ts_data[i], ts_data[i] - np.mean(window),
                            (ts_data[i] - np.mean(window)) / (np.std(window) + 1e-8),
                            skew(window), kurtosis(window),
                            np.sum(np.diff(window) > 0) / len(window) if len(window) > 1 else 0,
                        ]
                        features.append(feature_vector)
                    X = np.array(features)
                    clf = IsolationForest(contamination=contamination, random_state=42, n_estimators=200)
                    anomaly_labels_sub = clf.fit_predict(X)
                    sub_labels = np.ones(n_points)
                    sub_labels[window_size:] = anomaly_labels_sub
                    sub_scores = np.ones(n_points)
                    sub_scores[window_size:] = clf.decision_function(X)
                elif sub_method == 'robust_covariance':
                    # 鲁棒协方差方法的实现
                    from sklearn.covariance import EllipticEnvelope
                    features = []
                    for i in range(window_size, n_points):
                        window = ts_data[i-window_size:i]
                        feature_vector = [
                            np.mean(window), np.std(window), np.median(window),
                            np.percentile(window, 25), np.percentile(window, 75),
                            ts_data[i], ts_data[i] - np.mean(window),
                            np.sum(np.diff(window) > 0) / len(window) if len(window) > 1 else 0
                        ]
                        features.append(feature_vector)
                    X = np.array(features)
                    robust_cov = EllipticEnvelope(contamination=contamination, random_state=42)
                    anomaly_labels_sub = robust_cov.fit_predict(X)
                    sub_labels = np.ones(n_points)
                    sub_labels[window_size:] = anomaly_labels_sub
                    sub_scores = np.ones(n_points)
                    sub_scores[window_size:] = robust_cov.decision_function(X)
                
                ensemble_scores.append(sub_scores)
                ensemble_labels.append(sub_labels)
            
            if not ensemble_scores:
                return {"error": "集成方法中所有子方法都失败"}
            
            # 平均异常分数
            anomaly_scores = np.mean(ensemble_scores, axis=0)
            
            # 投票决定异常标签
            ensemble_labels = np.array(ensemble_labels)
            vote_counts = np.sum(ensemble_labels == -1, axis=0)
            full_anomaly_labels = np.where(vote_counts >= len(methods) // 2 + 1, -1, 1)
            
        else:
            return {"error": f"不支持的异常检测方法: {method}"}
        
        # 识别异常点
        anomaly_indices = np.where(full_anomaly_labels == -1)[0]
        normal_indices = np.where(full_anomaly_labels == 1)[0]
        
        # 计算异常点的严重程度
        if len(anomaly_indices) > 0:
            anomaly_severity = np.abs(anomaly_scores[anomaly_indices])
            severity_percentiles = np.percentile(anomaly_severity, [25, 50, 75, 90])
        else:
            severity_percentiles = np.array([0, 0, 0, 0])
        
        # 创建增强的可视化
        fig, axes = plt.subplots(3, 2, figsize=(20, 16))
        fig.suptitle(f'智能时间序列异常检测 - 完善版\n方法: {method} | 数据质量: {data_quality.get("overall_score", 0):.2f}', fontsize=16)
        
        # 原始数据和异常点
        axes[0, 0].plot(ts_data, label='原始数据', linewidth=2, alpha=0.8, color='blue')
        if len(anomaly_indices) > 0:
            # 根据严重程度着色异常点
            severity_colors = ['orange', 'red', 'darkred']
            for i, idx in enumerate(anomaly_indices):
                severity = np.abs(anomaly_scores[idx])
                if severity <= severity_percentiles[1]:  # 轻微异常
                    color = 'orange'
                    size = 30
                elif severity <= severity_percentiles[2]:  # 中等异常
                    color = 'red'
                    size = 50
                else:  # 严重异常
                    color = 'darkred'
                    size = 70
                
                axes[0, 0].scatter(idx, ts_data[idx], color=color, s=size, 
                                 alpha=0.8, zorder=5, edgecolors='black', linewidth=0.5)
            
            # 添加图例
            axes[0, 0].scatter([], [], color='orange', s=30, label='轻微异常', alpha=0.8)
            axes[0, 0].scatter([], [], color='red', s=50, label='中等异常', alpha=0.8)
            axes[0, 0].scatter([], [], color='darkred', s=70, label='严重异常', alpha=0.8)
        
        axes[0, 0].set_title(f'时间序列数据与异常点 (共{len(anomaly_indices)}个)')
        axes[0, 0].set_xlabel('时间')
        axes[0, 0].set_ylabel('值')
        axes[0, 0].legend()
        axes[0, 0].grid(True, alpha=0.3)
        
        # 异常分数时间序列
        axes[0, 1].plot(anomaly_scores, label='异常分数', linewidth=2, color='orange')
        threshold_line = np.percentile(anomaly_scores, (1-contamination)*100)
        axes[0, 1].axhline(y=threshold_line, color='red', linestyle='--', 
                          label=f'{contamination*100}%阈值 ({threshold_line:.3f})')
        axes[0, 1].fill_between(range(n_points), anomaly_scores, threshold_line, 
                               where=(anomaly_scores < threshold_line), 
                               color='red', alpha=0.3, label='异常区域')
        axes[0, 1].set_title('异常分数时间序列')
        axes[0, 1].set_xlabel('时间')
        axes[0, 1].set_ylabel('异常分数')
        axes[0, 1].legend()
        axes[0, 1].grid(True, alpha=0.3)
        
        # 异常分数分布
        axes[1, 0].hist(anomaly_scores, bins=50, alpha=0.7, color='skyblue', edgecolor='black')
        axes[1, 0].axvline(threshold_line, color='red', linestyle='--', linewidth=2, 
                          label=f'阈值 ({threshold_line:.3f})')
        axes[1, 0].set_title('异常分数分布')
        axes[1, 0].set_xlabel('异常分数')
        axes[1, 0].set_ylabel('频次')
        axes[1, 0].legend()
        axes[1, 0].grid(True, alpha=0.3)
        
        # 异常检测统计
        stats_data = {
            '正常点': len(normal_indices),
            '异常点': len(anomaly_indices)
        }
        
        bars = axes[1, 1].bar(stats_data.keys(), stats_data.values(), 
                             color=['green', 'red'], alpha=0.7)
        axes[1, 1].set_title('异常检测统计')
        axes[1, 1].set_ylabel('数量')
        
        # 数据质量评估可视化
        quality_metrics = ['缺失值', '无穷值', '偏度', '峰度', '异常率']
        quality_values = [
            data_quality.get('missing_values', 0),
            data_quality.get('infinite_values', 0),
            min(data_quality.get('skewness', 0) / 5, 1),  # 归一化
            min(data_quality.get('kurtosis', 0) / 10, 1),  # 归一化
            data_quality.get('outlier_ratio', 0)
        ]
        
        bars = axes[2, 0].bar(quality_metrics, quality_values, 
                             color=['blue', 'orange', 'green', 'purple', 'red'], alpha=0.7)
        axes[2, 0].set_title(f'数据质量评估 (总分: {data_quality.get("overall_score", 0):.2f})')
        axes[2, 0].set_ylabel('指标值')
        axes[2, 0].tick_params(axis='x', rotation=45)
        axes[2, 0].set_ylim(0, 1)
        
        # 智能分析结果
        analysis_text = f"""智能分析结果:

数据特征:
• 总数据点: {n_points}
• 检测到的季节周期: {seasonal_period if seasonal_period else '无'}
• 自动调优污染率: {contamination:.3f}
• 优化窗口大小: {window_size}

异常检测结果:
• 异常点数量: {len(anomaly_indices)}
• 异常比例: {len(anomaly_indices)/n_points*100:.2f}%
• 置信水平: {confidence_level*100:.1f}%

严重程度分布:
• 轻微异常: {np.sum(np.abs(anomaly_scores[anomaly_indices]) <= severity_percentiles[1]) if len(anomaly_indices) > 0 else 0}
• 中等异常: {np.sum((np.abs(anomaly_scores[anomaly_indices]) > severity_percentiles[1]) & (np.abs(anomaly_scores[anomaly_indices]) <= severity_percentiles[2])) if len(anomaly_indices) > 0 else 0}
• 严重异常: {np.sum(np.abs(anomaly_scores[anomaly_indices]) > severity_percentiles[2]) if len(anomaly_indices) > 0 else 0}

数据质量:
• 整体质量分数: {data_quality.get('overall_score', 0):.3f}
• 数据偏度: {data_quality.get('skewness', 0):.3f}
• 数据峰度: {data_quality.get('kurtosis', 0):.3f}"""
        
        axes[2, 1].text(0.05, 0.95, analysis_text, transform=axes[2, 1].transAxes, 
                        fontsize=10, verticalalignment='top',
                        bbox=dict(boxstyle="round,pad=0.3", facecolor="lightblue", alpha=0.8))
        axes[2, 1].set_title('智能分析报告')
        axes[2, 1].axis('off')
        
        plt.tight_layout()
        plot_base64 = create_plot_base64(fig)
        
        # 异常点详细信息
        anomaly_details = []
        for idx in anomaly_indices:
            severity = np.abs(anomaly_scores[idx])
            if severity <= severity_percentiles[1]:
                level = '轻微'
            elif severity <= severity_percentiles[2]:
                level = '中等'
            else:
                level = '严重'
            
            anomaly_details.append({
                'index': int(idx),
                'value': float(ts_data[idx]),
                'score': float(anomaly_scores[idx]),
                'severity': level
            })
        
        return {
            "method": method,
            "total_points": n_points,
            "anomaly_count": len(anomaly_indices),
            "anomaly_ratio": float(len(anomaly_indices) / n_points),
            "anomaly_indices": [int(x) for x in anomaly_indices],
            "anomaly_scores": [float(x) for x in anomaly_scores],
            "anomaly_labels": [int(x) for x in full_anomaly_labels],
            "anomaly_details": anomaly_details,
            "severity_percentiles": [float(x) for x in severity_percentiles],
            "contamination": float(contamination),
            "window_size": int(window_size),
            "sensitivity": float(sensitivity),
            "threshold": float(threshold_line),
            "confidence_level": float(confidence_level),
            "auto_tuned": auto_tune,
            "detected_seasonal_period": seasonal_period,
            "data_quality": {
                "overall_score": float(data_quality.get('overall_score', 0)),
                "missing_values_ratio": float(data_quality.get('missing_values', 0)),
                "infinite_values_ratio": float(data_quality.get('infinite_values', 0)),
                "skewness": float(data_quality.get('skewness', 0)),
                "kurtosis": float(data_quality.get('kurtosis', 0)),
                "estimated_outlier_ratio": float(data_quality.get('outlier_ratio', 0))
            },
            "severity_distribution": {
                "mild": int(np.sum(np.abs(anomaly_scores[anomaly_indices]) <= severity_percentiles[1]) if len(anomaly_indices) > 0 else 0),
                "moderate": int(np.sum((np.abs(anomaly_scores[anomaly_indices]) > severity_percentiles[1]) & (np.abs(anomaly_scores[anomaly_indices]) <= severity_percentiles[2])) if len(anomaly_indices) > 0 else 0),
                "severe": int(np.sum(np.abs(anomaly_scores[anomaly_indices]) > severity_percentiles[2]) if len(anomaly_indices) > 0 else 0)
            },
            "intelligent_analysis": {
                "auto_optimized_contamination": float(contamination),
                "auto_optimized_window_size": int(window_size),
                "seasonality_detected": seasonal_period is not None,
                "data_quality_score": float(data_quality.get('overall_score', 0)),
                "recommended_method": method,
                "confidence_score": float(min(1.0, data_quality.get('overall_score', 0) * (1 - len(anomaly_indices) / n_points * 2)))
            },
            "visualization": plot_base64
        }
        
    except Exception as e:
        import traceback
        error_details = traceback.format_exc()
        return {"error": f"异常检测错误: {str(e)}\n详细错误信息:\n{error_details}"}