#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
sys.stdout.reconfigure(encoding='utf-8')

from mcp.server.fastmcp import FastMCP
import asyncio
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import base64
import io
import json
import warnings
import logging
from typing import Union, List, Dict, Any, Tuple, Optional
from pathlib import Path
import types
from dataclasses import dataclass
from enum import Enum
import sympy as sp
from scipy.interpolate import CubicSpline, Rbf, BarycentricInterpolator
from sklearn.linear_model import Ridge, Lasso
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error

# 科学计算库
import scipy.optimize as opt
import scipy.interpolate as interp
import scipy.integrate as integrate
from scipy.stats import jarque_bera, shapiro

# 机器学习库
try:
    from sklearn.cluster import KMeans, DBSCAN, AgglomerativeClustering
    from sklearn.svm import SVC
    from sklearn.ensemble import RandomForestClassifier, IsolationForest
    from sklearn.linear_model import LogisticRegression
    from sklearn.tree import DecisionTreeClassifier
    from sklearn.neighbors import KNeighborsClassifier
    from sklearn.naive_bayes import GaussianNB
    from sklearn.metrics import (
        accuracy_score, precision_score, recall_score, f1_score, confusion_matrix, 
        silhouette_score, calinski_harabasz_score, classification_report,
        roc_auc_score, roc_curve
    )
    from sklearn.preprocessing import StandardScaler, LabelEncoder, MinMaxScaler
    pass  # sklearn库可用
except ImportError:
    pass  # sklearn库不可用，在需要时进行检查
    print("警告：sklearn库未安装，机器学习功能将受限")

# 导入配置文件，确保中文字体正确显示
try:
    from config import *
    print("✓ 中文字体配置已加载")
except ImportError:
    print("⚠ 配置文件未找到，使用默认设置")
    # 基本的中文字体配置
    plt.rcParams['font.sans-serif'] = ['Microsoft YaHei', 'SimHei', 'SimSun']
    plt.rcParams['axes.unicode_minus'] = False

# 导入统一可视化模块
try:
    from unified_visualization import UnifiedVisualizer
    unified_viz = UnifiedVisualizer()
    print("✓ 统一可视化模块已加载")
except ImportError:
    print("⚠ 统一可视化模块未找到，使用内置可视化功能")
    unified_viz = None
try:
    from sklearn.model_selection import train_test_split
    from sklearn.feature_selection import (
        mutual_info_classif, chi2, f_classif, SelectKBest, RFE
    )
except ImportError:
    pass  # sklearn库不可用

# 时间序列分析库
try:
    import statsmodels.api as sm
    from statsmodels.tsa.arima.model import ARIMA
    from statsmodels.tsa.ar_model import AutoReg
    from statsmodels.tsa.stattools import adfuller, acf, pacf
    from statsmodels.tsa.seasonal import seasonal_decompose
    from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
    STATSMODELS_AVAILABLE = True
except ImportError:
    STATSMODELS_AVAILABLE = False
    print("警告：statsmodels库未安装，时间序列分析功能将受限")

# 导入增强版时间序列分析模块
try:
    from enhanced_time_series import EnhancedTimeSeriesAnalyzer, get_enhanced_analyzer
    enhanced_ts_analyzer = get_enhanced_analyzer()
    print("✓ 增强版时间序列分析模块已加载 - 支持百万级别数据处理")
except ImportError:
    enhanced_ts_analyzer = None
    print("⚠ 增强版时间序列分析模块未找到，使用标准功能")

# 可视化库
import plotly.graph_objects as go
import plotly.express as px

warnings.filterwarnings('ignore')

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei', 'Microsoft YaHei', 'DejaVu Sans']
plt.rcParams['axes.unicode_minus'] = False

import asyncio
import json
import logging
from typing import Any, Dict, List, Optional, Union, Tuple
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
try:
    from sklearn.cluster import KMeans, DBSCAN, AgglomerativeClustering
    from sklearn.preprocessing import StandardScaler
    from sklearn.decomposition import PCA
    from sklearn.metrics import silhouette_score, silhouette_samples, calinski_harabasz_score, davies_bouldin_score
except ImportError:
    pass  # sklearn库不可用
from scipy.cluster.hierarchy import dendrogram, linkage
import base64
from io import BytesIO
import mcp

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei', 'Microsoft YaHei', 'DejaVu Sans']
plt.rcParams['axes.unicode_minus'] = False

def create_plot_base64(fig):
    """将matplotlib图形转换为base64编码的字符串"""
    buffer = BytesIO()
    fig.savefig(buffer, format='png', dpi=300, bbox_inches='tight')
    buffer.seek(0)
    image_base64 = base64.b64encode(buffer.getvalue()).decode('utf-8')
    plt.close(fig)
    return f"data:image/png;base64,{image_base64}"

# 辅助函数定义
def format_friendly_expression(symbolic_str):
    """生成友好格式的数学表达式"""
    try:
        if "符号表达式生成失败" in str(symbolic_str) or not symbolic_str:
            return "表达式生成失败"
        
        # 简化表达式显示
        expr_str = str(symbolic_str)
        
        # 替换常见的数学符号为更友好的格式
        expr_str = expr_str.replace('**', '^')
        expr_str = expr_str.replace('*', '·')
        
        return f"f(x) = {expr_str}"
    except Exception as e:
        return f"表达式格式化失败: {str(e)}"

def safe_file_path(file_path):
    """确保文件路径安全"""
    try:
        import os
        import re
        
        if not file_path:
            return None
            
        # 规范化路径
        file_path = os.path.normpath(file_path)
        
        # 移除危险字符
        file_path = re.sub(r'[<>:"|?*]', '_', file_path)
        
        # 确保路径不包含相对路径攻击
        if '..' in file_path:
            file_path = file_path.replace('..', '_')
            
        return file_path
    except Exception as e:
        print(f"路径安全检查失败: {e}")
        return file_path

def ensure_utf8_encoding(text):
    """确保文本使用UTF-8编码"""
    try:
        if isinstance(text, str):
            return text
        elif isinstance(text, bytes):
            return text.decode('utf-8', errors='ignore')
        else:
            return str(text)
    except Exception as e:
        print(f"编码转换失败: {e}")
        return str(text)

# ====================模块 1 基础运算模块  ====================# 创建MCP服务器实例
mcp = FastMCP("Math-Server")

@mcp.tool()
def arithmetic_operation(operation: str, values: List[float]) -> Union[float, str]:
    """
    执行基础算术运算（加法、减法、乘法、除法、乘方、平方根）
    
    Args:
        operation: 运算类型 (add, subtract, multiply, divide, power, sqrt)
        values: 操作数列表（根据运算类型需要1-2个操作数）
    
    Returns:
        运算结果或错误信息
    """
    try:
        if operation == "add":
            if len(values) < 2:
                return "错误：加法需要至少两个操作数"
            return sum(values)
        
        elif operation == "subtract":
            if len(values) < 2:
                return "错误：减法需要至少两个操作数"
            result = values[0]
            for num in values[1:]:
                result -= num
            return result
        
        elif operation == "multiply":
            if len(values) < 2:
                return "错误：乘法需要至少两个操作数"
            result = 1
            for num in values:
                result *= num
            return result
        
        elif operation == "divide":
            if len(values) != 2:
                return "错误：除法需要恰好两个操作数"
            if values[1] == 0:
                return "错误：除数不能为零"
            return values[0] / values[1]
        
        elif operation == "power":
            if len(values) != 2:
                return "错误：乘方需要恰好两个操作数"
            return math.pow(values[0], values[1])
        
        elif operation == "sqrt":
            if len(values) != 1:
                return "错误：平方根需要恰好一个操作数"
            if values[0] < 0:
                return "错误：不能计算负数的平方根"
            return math.sqrt(values[0])
        
        else:
            return f"错误：未知的运算类型 '{operation}'"
    
    except Exception as e:
        return f"计算错误: {str(e)}"

@mcp.tool()
def calculate_expression(expression: str) -> Union[float, str]:
    """
    计算数学表达式
    
    Args:
        expression: 要计算的数学表达式字符串
        (支持 +, -, *, /, **, (), sqrt, sin, cos, tan, log, exp等数学函数)
    
    Returns:
        表达式的计算结果或错误信息
    """
    try:
        # 安全检查和预处理
        expression = expression.strip().lower()
        dangerous_keywords = ['import', 'exec', 'eval', '__', 'open', 'file', 'input']
        if any(kw in expression for kw in dangerous_keywords):
            return "错误：表达式包含不安全的关键字"
        
        # 创建安全的数学环境
        safe_dict = {
            "sqrt": math.sqrt,
            "sin": math.sin,
            "cos": math.cos,
            "tan": math.tan,
            "log": math.log,
            "log10": math.log10,
            "exp": math.exp,
            "abs": abs,
            "pow": pow,
            "pi": math.pi,
            "e": math.e,
            "ceil": math.ceil,
            "floor": math.floor,
            "round": round
        }
        
        # 使用 eval 计算表达式
        result = eval(expression, {"__builtins__": None}, safe_dict)
        
        if isinstance(result, (int, float)):
            return float(result)
        else:
            return "错误：计算结果不是数字"
            
    except ZeroDivisionError:
        return "错误：除数不能为零"
    except NameError as e:
        return f"错误：未知的函数或变量 - {str(e)}"
    except Exception as e:
        return f"错误：无效的表达式 - {str(e)}"

@mcp.tool()
def linear_algebra_operation(operation: str, matrix: List[List[float]], 
                             vector: List[float] = None, 
                             matrix_b: List[List[float]] = None) -> Union[Dict, str]:
    """
    执行线性代数运算
    
    Args:
        operation: 操作类型 (gaussian, lu, qr, power_method, jacobi, matrix_ops, svd, eigen)
        matrix: 主矩阵
        vector: 线性方程组中的向量（高斯消元法需要）
        matrix_b: 第二个矩阵（矩阵运算需要）
    
    Returns:
        运算结果或错误信息
    """
    try:
        np_matrix = np.array(matrix, dtype=float)
        
        if operation == "gaussian":
            # 高斯消元法求解线性方程组
            if vector is None:
                return "错误：需要提供向量b"
            if len(vector) != len(matrix):
                return "错误：矩阵行数与向量长度不匹配"
            if len(matrix[0]) != len(matrix):
                return "错误：系数矩阵必须是方阵"
            
            # 使用numpy求解
            solution = np.linalg.solve(np_matrix, np.array(vector))
            return {"solution": solution.tolist()}
        
        elif operation == "lu":
            # LU分解
            if len(matrix[0]) != len(matrix):
                return "错误：矩阵必须是方阵"
            
            # 使用scipy进行LU分解
            from scipy.linalg import lu
            P, L, U = lu(np_matrix)
            return {
                "P_matrix": P.tolist(),
                "L_matrix": L.tolist(),
                "U_matrix": U.tolist()
            }
        
        elif operation == "qr":
            # QR分解
            Q, R = np.linalg.qr(np_matrix)
            return {
                "Q_matrix": Q.tolist(),
                "R_matrix": R.tolist()
            }
        
        elif operation == "power_method":
            # 幂法计算主特征值
            if len(matrix[0]) != len(matrix):
                return "错误：矩阵必须是方阵"
            
            # 使用numpy计算特征值
            eigenvalues = np.linalg.eigvals(np_matrix)
            max_eigenvalue = max(eigenvalues, key=abs)
            return {"max_eigenvalue": float(max_eigenvalue.real)}
        
        elif operation == "jacobi":
            # 雅可比方法（仅适用于对称矩阵）
            if not np.allclose(np_matrix, np_matrix.T):
                return "错误：矩阵必须是对称的"
            
            eigenvalues, eigenvectors = np.linalg.eigh(np_matrix)
            return {
                "eigenvalues": eigenvalues.tolist(),
                "eigenvectors": eigenvectors.tolist()
            }
        
        elif operation == "matrix_ops":
            # 矩阵基本运算
            if matrix_b is None:
                # 单矩阵操作
                if len(matrix[0]) != len(matrix):
                    return "错误：转置和求逆需要方阵"
                
                return {
                    "transpose": np_matrix.T.tolist(),
                    "determinant": float(np.linalg.det(np_matrix)),
                    "inverse": np.linalg.inv(np_matrix).tolist()
                }
            else:
                # 双矩阵操作
                np_matrix_b = np.array(matrix_b, dtype=float)
                return {
                    "addition": (np_matrix + np_matrix_b).tolist(),
                    "subtraction": (np_matrix - np_matrix_b).tolist(),
                    "multiplication": np.dot(np_matrix, np_matrix_b).tolist()
                }
        
        elif operation == "svd":
            # 奇异值分解
            U, S, Vt = np.linalg.svd(np_matrix)
            return {
                "U_matrix": U.tolist(),
                "singular_values": S.tolist(),
                "V_transpose": Vt.tolist()
            }
        
        elif operation == "eigen":
            # 特征值分析
            if len(matrix[0]) != len(matrix):
                return "错误：矩阵必须是方阵"
            
            eigenvalues, eigenvectors = np.linalg.eig(np_matrix)
            return {
                "eigenvalues": eigenvalues.tolist(),
                "eigenvectors": eigenvectors.tolist()
            }
        
        else:
            return f"错误：未知的线性代数操作 '{operation}'"
    
    except np.linalg.LinAlgError as e:
        return f"线性代数错误: {str(e)}"
    except Exception as e:
        return f"计算错误: {str(e)}"

# ====================模块 2 优化模块  ====================
@mcp.tool()
def newton_optimization(objective_func: str, initial_point: List[float], 
                       tolerance: float = 1e-6, max_iterations: int = 100, minimize: bool = True) -> Union[Dict[str, Any], str]:
    """
    牛顿法无约束优化
    
    参数:
    - objective_func: 目标函数表达式
    - initial_point: 初始点
    - tolerance: 收敛容差
    - max_iterations: 最大迭代次数
    - minimize: True为求最小值，False为求最大值
    """
    try:
        def objective(x):
            x = np.asarray(x, dtype=float)
            x_val = float(x[0]) if len(x) > 0 else 0.0
            y_val = float(x[1]) if len(x) > 1 else 0.0
            if objective_func == "(x-2)**2 + (y-1)**2":
                result = (x_val - 2.0)**2 + (y_val - 1.0)**2
            else:
                obj_expr = objective_func.replace('x', str(x_val)).replace('y', str(y_val))
                import math
                safe_dict = {
                    "__builtins__": {}, "sin": math.sin, "cos": math.cos,
                    "exp": math.exp, "log": math.log, "sqrt": math.sqrt,
                    "abs": abs, "pi": math.pi, "e": math.e, "pow": pow
                }
                result = float(eval(obj_expr, safe_dict, {}))
            
            # 如果求最大值，返回负值
            return result if minimize else -result

        result = opt.minimize(objective, initial_point, method='Newton-CG',
                             options={'maxiter': max_iterations, 'gtol': tolerance})

        # 如果求最大值，需要将结果的函数值取负
        optimal_value = float(result.fun) if minimize else -float(result.fun)
        optimization_type = "最小值" if minimize else "最大值"
        
        return {
            "optimal_point": result.x.tolist(),
            "optimal_value": optimal_value,
            "success": bool(result.success),
            "iterations": int(result.nit),
            "message": result.message,
            "method": f"牛顿法（求{optimization_type}）",
            "optimization_type": optimization_type
        }

    except Exception as e:
        return f"错误：牛顿法优化失败 - {str(e)}"

@mcp.tool()
def bfgs_optimization(objective_func: str, initial_point: List[float],
                      tolerance: float = 1e-6, max_iterations: int = 100, minimize: bool = True) -> Union[Dict[str, Any], str]:
    """
    拟牛顿法 BFGS 无约束优化
    
    参数:
    - objective_func: 目标函数表达式
    - initial_point: 初始点
    - tolerance: 收敛容差
    - max_iterations: 最大迭代次数
    - minimize: True为求最小值，False为求最大值
    """
    try:
        def objective(x):
            x = np.asarray(x, dtype=float)
            x_val = float(x[0]) if len(x) > 0 else 0.0
            y_val = float(x[1]) if len(x) > 1 else 0.0
            obj_expr = objective_func.replace('x', str(x_val)).replace('y', str(y_val))
            import math
            safe_dict = {
                "__builtins__": {}, "sin": math.sin, "cos": math.cos,
                "exp": math.exp, "log": math.log, "sqrt": math.sqrt,
                "abs": abs, "pi": math.pi, "e": math.e, "pow": pow
            }
            result = float(eval(obj_expr, safe_dict, {}))
            
            # 如果求最大值，返回负值
            return result if minimize else -result

        result = opt.minimize(objective, initial_point, method='BFGS',
                             options={'maxiter': max_iterations, 'gtol': tolerance})

        # 如果求最大值，需要将结果的函数值取负
        optimal_value = float(result.fun) if minimize else -float(result.fun)
        optimization_type = "最小值" if minimize else "最大值"
        
        return {
            "optimal_point": result.x.tolist(),
            "optimal_value": optimal_value,
            "success": bool(result.success),
            "iterations": int(result.nit),
            "message": result.message,
            "method": f"拟牛顿法 BFGS（求{optimization_type}）",
            "optimization_type": optimization_type
        }

    except Exception as e:
        return f"错误：BFGS优化失败 - {str(e)}"

# ==================== 非线性规划 - 有约束优化模块 ====================

@mcp.tool()
def sqp_optimization(objective_func: str, constraints: List[Dict], initial_point: List[float],
                     tolerance: float = 1e-6, max_iterations: int = 100, minimize: bool = True) -> Union[Dict[str, Any], str]:
    """
    顺序二次规划（SQP）优化器（调用 SLSQP 实现）
    
    参数:
    - objective_func: 目标函数表达式
    - constraints: 约束条件列表
    - initial_point: 初始点
    - tolerance: 收敛容差
    - max_iterations: 最大迭代次数
    - minimize: True为求最小值，False为求最大值
    """
    try:
        def objective(x):
            x = np.asarray(x, dtype=float)
            x_val = float(x[0]) if len(x) > 0 else 0.0
            y_val = float(x[1]) if len(x) > 1 else 0.0
            obj_expr = objective_func.replace('x', str(x_val)).replace('y', str(y_val))
            import math
            safe_dict = {
                "__builtins__": {}, "sin": math.sin, "cos": math.cos,
                "exp": math.exp, "log": math.log, "sqrt": math.sqrt,
                "abs": abs, "pi": math.pi, "e": math.e, "pow": pow
            }
            result = float(eval(obj_expr, safe_dict, {}))
            
            # 如果求最大值，返回负值
            return result if minimize else -result

        scipy_constraints = []
        for con in constraints:
            if con['type'] not in ('eq', 'ineq'):
                continue
            expr = con['fun']
            def constraint_func(x, expr=expr):
                x = np.asarray(x, dtype=float)
                x_val = float(x[0]) if len(x) > 0 else 0.0
                y_val = float(x[1]) if len(x) > 1 else 0.0
                expr_eval = expr.replace('x', str(x_val)).replace('y', str(y_val))
                import math
                safe_dict = {
                    "__builtins__": {}, "sin": math.sin, "cos": math.cos,
                    "exp": math.exp, "log": math.log, "sqrt": math.sqrt,
                    "abs": abs, "pi": math.pi, "e": math.e, "pow": pow
                }
                return float(eval(expr_eval, safe_dict, {}))
            scipy_constraints.append({"type": con['type'], "fun": constraint_func})

        result = opt.minimize(objective, initial_point, method='SLSQP',
                             constraints=scipy_constraints,
                             options={'maxiter': max_iterations, 'ftol': tolerance})

        # 如果求最大值，需要将结果的函数值取负
        optimal_value = float(result.fun) if minimize else -float(result.fun)
        optimization_type = "最小值" if minimize else "最大值"
        
        return {
            "optimal_point": result.x.tolist(),
            "optimal_value": optimal_value,
            "success": bool(result.success),
            "iterations": int(result.nit),
            "message": result.message,
            "method": f"顺序二次规划（SLSQP）（求{optimization_type}）",
            "optimization_type": optimization_type
        }

    except Exception as e:
        return f"错误：SQP优化失败 - {str(e)}"

# ==================== 全局优化模块（元启发式算法） ====================

@mcp.tool()
def differential_evolution_optimization(objective_func: str, bounds: List[Tuple[float, float]],
                                        max_iterations: int = 1000, pop_size: int = 15, minimize: bool = True) -> Union[Dict[str, Any], str]:
    """
    差分进化算法求解全局优化问题
    
    参数:
    - objective_func: 目标函数表达式
    - bounds: 变量边界
    - max_iterations: 最大迭代次数
    - pop_size: 种群大小
    - minimize: True为求最小值，False为求最大值
    """
    try:
        def objective(x):
            x = np.asarray(x, dtype=float)
            x_val = float(x[0]) if len(x) > 0 else 0.0
            y_val = float(x[1]) if len(x) > 1 else 0.0
            obj_expr = objective_func.replace('x', str(x_val)).replace('y', str(y_val))
            import math
            safe_dict = {
                "__builtins__": {}, "sin": math.sin, "cos": math.cos,
                "exp": math.exp, "log": math.log, "sqrt": math.sqrt,
                "abs": abs, "pi": math.pi, "e": math.e, "pow": pow
            }
            result = float(eval(obj_expr, safe_dict, {}))
            
            # 如果求最大值，返回负值
            return result if minimize else -result

        result = opt.differential_evolution(objective, bounds, strategy='best1bin',
                                            maxiter=max_iterations, popsize=pop_size)

        # 如果求最大值，需要将结果的函数值取负
        optimal_value = float(result.fun) if minimize else -float(result.fun)
        optimization_type = "最小值" if minimize else "最大值"
        
        return {
            "optimal_point": result.x.tolist(),
            "optimal_value": optimal_value,
            "success": bool(result.success),
            "iterations": int(result.nit),
            "message": result.message,
            "method": f"差分进化算法（求{optimization_type}）",
            "optimization_type": optimization_type
        }

    except Exception as e:
        return f"错误：差分进化优化失败 - {str(e)}"

# ==================== 线性规划 LP ====================

@mcp.tool()
def linear_programming(c: List[float], A_ub: Optional[List[List[float]]] = None,
                       b_ub: Optional[List[float]] = None,
                       A_eq: Optional[List[List[float]]] = None,
                       b_eq: Optional[List[float]] = None,
                       bounds: Optional[List[List[float]]] = None) -> Union[Dict[str, Any], str]:
    """
    线性规划求解器（调用 linprog）
    bounds参数：每个变量的边界，格式为[[下界, 上界], ...]，使用大数值表示无界
    例如：[[0, 1e10], [-1e10, 10], [0, 100]]
    """
    try:
        # 替换无穷上下界为极值，避免 Pydantic 校验失败
        processed_bounds = []
        if bounds is not None:
            for bound in bounds:
                if bound is None or len(bound) != 2:
                    processed_bounds.append([-1e20, 1e20])  # 默认无界
                else:
                    lower = bound[0] if bound[0] is not None and bound[0] > -1e9 else -1e20
                    upper = bound[1] if bound[1] is not None and bound[1] < 1e9 else 1e20
                    processed_bounds.append([lower, upper])

        result = opt.linprog(c=c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq,
                             bounds=processed_bounds if bounds else None,
                             method='highs')

        return {
            "optimal_point": result.x.tolist() if result.success else None,
            "optimal_value": float(result.fun) if result.success else None,
            "success": bool(result.success),
            "message": result.message,
            "method": "线性规划（HiGHS）"
        }
    except Exception as e:
        return f"错误：线性规划求解失败 - {str(e)}"

# ==================== 二次规划 QP ====================

@mcp.tool()
def quadratic_programming(P: List[List[float]], q: List[float],
                          G: Optional[List[List[float]]] = None, h: Optional[List[float]] = None,
                          A: Optional[List[List[float]]] = None, b: Optional[List[float]] = None) -> Union[Dict[str, Any], str]:
    """
    二次规划求解器（使用 cvxopt）
    """
    try:
        from cvxopt import matrix, solvers
        import numpy as np
        solvers.options['show_progress'] = False
        
        # 转换为numpy数组然后转换为cvxopt matrix
        P_mat = matrix(np.array(P, dtype=float))
        q_mat = matrix(np.array(q, dtype=float))
        
        G_mat = None
        h_mat = None
        if G is not None and h is not None:
            G_mat = matrix(np.array(G, dtype=float))
            h_mat = matrix(np.array(h, dtype=float))
        
        A_mat = None
        b_mat = None
        if A is not None and b is not None:
            # cvxopt的A矩阵格式：需要明确指定维度
            A_array = np.array(A, dtype=float)
            if A_array.ndim == 1:
                A_array = A_array.reshape(1, -1)
            # 使用正确的matrix构造方式
            A_mat = matrix(A_array.flatten().tolist(), A_array.shape)
            b_mat = matrix(np.array(b, dtype=float))

        result = solvers.qp(P_mat, q_mat, G_mat, h_mat, A_mat, b_mat)
        
        if result['status'] == 'optimal':
            return {
                "optimal_point": [float(v) for v in result['x']],
                "optimal_value": float(result['primal objective']),
                "status": result['status'],
                "success": True,
                "method": "二次规划（cvxopt）"
            }
        else:
            return {
                "optimal_point": None,
                "optimal_value": None,
                "status": result['status'],
                "success": False,
                "method": "二次规划（cvxopt）"
            }
    except Exception as e:
        return f"错误：二次规划求解失败 - {str(e)}"

# ==================== 整数规划 MILP ====================

@mcp.tool()
def mixed_integer_linear_programming(c: List[float], A: List[List[float]], b: List[float],
                                     integer_indices: List[int],
                                     bounds: Optional[List[Tuple[float, float]]] = None) -> Union[Dict[str, Any], str]:
    """
    混合整数线性规划（使用 pulp）
    """
    try:
        import pulp
        n = len(c)
        model = pulp.LpProblem("MILP", pulp.LpMinimize)
        x_vars = [
            pulp.LpVariable(f"x{i}", lowBound=bounds[i][0] if bounds else None,
                            upBound=bounds[i][1] if bounds else None,
                            cat=pulp.LpInteger if i in integer_indices else pulp.LpContinuous)
            for i in range(n)
        ]
        model += pulp.lpSum([c[i] * x_vars[i] for i in range(n)])
        for i in range(len(A)):
            model += pulp.lpSum([A[i][j] * x_vars[j] for j in range(n)]) <= b[i]

        model.solve()
        return {
            "optimal_point": [pulp.value(var) for var in x_vars],
            "optimal_value": pulp.value(model.objective),
            "status": pulp.LpStatus[model.status],
            "method": "混合整数线性规划（pulp）"
        }
    except Exception as e:
        return f"错误：MILP求解失败 - {str(e)}"


# ==================== 增广拉格朗日法 - 有约束非线性优化模块 ====================

@mcp.tool()
def augmented_lagrangian_optimization(objective_func: str, constraints: List[Dict], initial_point: List[float],
                                     tolerance: float = 1e-6, max_iterations: int = 100) -> Union[Dict[str, Any], str]:
    """
    增广拉格朗日法优化（使用 scipy minimize + penalty 形式实现）
    支持等式和不等式约束
    
    constraints格式示例：
    [
        {"type": "eq", "fun": "x + y - 1"},
        {"type": "ineq", "fun": "x - 0.5"}
    ]
    """
    try:
        import numpy as np
        from scipy.optimize import minimize
        
        def eval_expr(expr, x):
            x = np.asarray(x, dtype=float)
            x_val = float(x[0]) if len(x) > 0 else 0.0
            y_val = float(x[1]) if len(x) > 1 else 0.0
            import math
            safe_dict = {
                "__builtins__": {},
                "sin": math.sin, "cos": math.cos, "exp": math.exp,
                "log": math.log, "sqrt": math.sqrt, "abs": abs,
                "pi": math.pi, "e": math.e, "pow": pow
            }
            expr_eval = expr.replace('x', str(x_val)).replace('y', str(y_val))
            return float(eval(expr_eval, safe_dict, {}))

        # 原始目标函数
        def f(x):
            return eval_expr(objective_func, x)

        # 罚函数参数初始化
        mu = 10.0  # 惩罚因子
        lam = np.zeros(len(constraints))  # 拉格朗日乘子初始化
        
        xk = np.array(initial_point, dtype=float)
        
        for k in range(max_iterations):
            def aug_lag_obj(x):
                penalty = 0.0
                for i, con in enumerate(constraints):
                    c_val = eval_expr(con['fun'], x)
                    if con['type'] == 'eq':
                        penalty += lam[i] * c_val + 0.5 * mu * c_val ** 2
                    elif con['type'] == 'ineq':
                        penalty += lam[i] * min(0, c_val) + 0.5 * mu * min(0, c_val) ** 2
                return f(x) + penalty
            
            res = minimize(aug_lag_obj, xk, method='BFGS', options={'gtol': tolerance})
            xk = res.x
            
            # 更新拉格朗日乘子
            for i, con in enumerate(constraints):
                c_val = eval_expr(con['fun'], xk)
                if con['type'] == 'eq':
                    lam[i] = lam[i] + mu * c_val
                elif con['type'] == 'ineq':
                    lam[i] = lam[i] + mu * min(0, c_val)
            
            # 收敛判断（目标函数变化或约束满足度）
            if res.success:
                cons_violations = [abs(eval_expr(con['fun'], xk)) if con['type']=='eq' else max(0, -eval_expr(con['fun'], xk)) for con in constraints]
                if max(cons_violations) < tolerance:
                    break
            
            mu *= 10  # 惩罚因子加大
        
        return {
            "optimal_point": xk.tolist(),
            "optimal_value": f(xk),
            "iterations": k+1,
            "message": res.message,
            "success": res.success,
            "method": "增广拉格朗日法"
        }
    except Exception as e:
        return f"错误：增广拉格朗日优化失败 - {str(e)}"


# ==================== 内点法（trust-constr） - 有约束非线性优化模块 ====================

@mcp.tool()
def interior_point_optimization(objective_func: str, constraints: List[Dict], initial_point: List[float],
                                tolerance: float = 1e-6, max_iterations: int = 100) -> Union[Dict[str, Any], str]:
    """
    内点法优化（trust-constr）
    支持等式和不等式约束
    
    constraints格式同上
    """
    try:
        import numpy as np
        from scipy.optimize import minimize, NonlinearConstraint
        
        def eval_expr(expr, x):
            x = np.asarray(x, dtype=float)
            x_val = float(x[0]) if len(x) > 0 else 0.0
            y_val = float(x[1]) if len(x) > 1 else 0.0
            import math
            safe_dict = {
                "__builtins__": {},
                "sin": math.sin, "cos": math.cos,
                "exp": math.exp, "log": math.log,
                "sqrt": math.sqrt, "abs": abs,
                "pi": math.pi, "e": math.e, "pow": pow
            }
            expr_eval = expr.replace('x', str(x_val)).replace('y', str(y_val))
            return float(eval(expr_eval, safe_dict, {}))

        def objective(x):
            return eval_expr(objective_func, x)
        
        nl_constraints = []
        for con in constraints:
            if con['type'] == 'eq':
                nl_constraints.append(NonlinearConstraint(lambda x, expr=con['fun']: eval_expr(expr, x), 0, 0))
            elif con['type'] == 'ineq':
                # scipy定义不等式为 fun(x) >= lb; 所以用 lb=0
                nl_constraints.append(NonlinearConstraint(lambda x, expr=con['fun']: eval_expr(expr, x), 0, np.inf))

        result = minimize(objective, initial_point, method='trust-constr',
                          constraints=nl_constraints,
                          options={'gtol': tolerance, 'maxiter': max_iterations})

        return {
            "optimal_point": result.x.tolist(),
            "optimal_value": float(result.fun),
            "success": bool(result.success),
            "iterations": int(result.nit),
            "message": result.message,
            "method": "内点法 (trust-constr)"
        }

    except Exception as e:
        return f"错误：内点法优化失败 - {str(e)}"


# ==================== Zoutendijk可行方向法 - 有约束非线性优化模块 ====================

@mcp.tool()
def zoutendijk_feasible_direction_optimization(objective_func: str, constraints: List[Dict], initial_point: List[float],
                                              tolerance: float = 1e-6, max_iterations: int = 100) -> Union[Dict[str, Any], str]:
    """
    Zoutendijk可行方向法优化
    适用于线性约束的非线性规划问题
    
    constraints格式示例：
    [
        {"type": "eq", "fun": "x + y - 1"},
        {"type": "ineq", "fun": "x - 0.5"}
    ]
    """
    try:
        import numpy as np
        from scipy.optimize import minimize_scalar
        
        def eval_expr(expr, x):
            x = np.asarray(x, dtype=float)
            x_val = float(x[0]) if len(x) > 0 else 0.0
            y_val = float(x[1]) if len(x) > 1 else 0.0
            import math
            safe_dict = {
                "__builtins__": {},
                "sin": math.sin, "cos": math.cos, "exp": math.exp,
                "log": math.log, "sqrt": math.sqrt, "abs": abs,
                "pi": math.pi, "e": math.e, "pow": pow
            }
            expr_eval = expr.replace('x', str(x_val)).replace('y', str(y_val))
            return float(eval(expr_eval, safe_dict, {}))
        
        def gradient_numerical(func, x, h=1e-8):
            """数值梯度计算"""
            grad = np.zeros_like(x)
            for i in range(len(x)):
                x_plus = x.copy()
                x_minus = x.copy()
                x_plus[i] += h
                x_minus[i] -= h
                grad[i] = (func(x_plus) - func(x_minus)) / (2 * h)
            return grad
        
        def objective(x):
            return eval_expr(objective_func, x)
        
        def constraint_value(x, constraint):
            return eval_expr(constraint['fun'], x)
        
        def is_feasible(x, constraints, tol=1e-10):
            """检查点是否可行"""
            for con in constraints:
                val = constraint_value(x, con)
                if con['type'] == 'eq' and abs(val) > tol:
                    return False
                elif con['type'] == 'ineq' and val < -tol:
                    return False
            return True
        
        def find_feasible_direction(x, constraints, grad_f):
            """寻找可行方向"""
            n = len(x)
            
            # 识别活跃约束
            active_constraints = []
            for i, con in enumerate(constraints):
                val = constraint_value(x, con)
                if con['type'] == 'eq' or (con['type'] == 'ineq' and abs(val) < 1e-8):
                    active_constraints.append(i)
            
            if not active_constraints:
                # 无活跃约束，使用负梯度方向
                return -grad_f / np.linalg.norm(grad_f)
            
            # 构建活跃约束的梯度矩阵
            A = []
            for idx in active_constraints:
                con = constraints[idx]
                grad_con = gradient_numerical(lambda y: constraint_value(y, con), x)
                A.append(grad_con)
            
            A = np.array(A)
            
            # 求解线性系统找到可行方向
            # 使用投影方法：d = -grad_f + A^T * lambda
            # 其中 A * d = 0 (对于活跃约束)
            
            try:
                # 使用QR分解求解零空间
                if A.shape[0] > 0:
                    Q, R = np.linalg.qr(A.T)
                    # 零空间基
                    null_space = Q[:, A.shape[0]:]
                    if null_space.shape[1] > 0:
                        # 在零空间中投影负梯度
                        proj_grad = null_space @ (null_space.T @ (-grad_f))
                        if np.linalg.norm(proj_grad) > 1e-12:
                            return proj_grad / np.linalg.norm(proj_grad)
                
                # 如果无法找到下降方向，尝试使用最小二乘解
                try:
                    lambda_opt = np.linalg.lstsq(A, grad_f, rcond=None)[0]
                    d = -grad_f + A.T @ lambda_opt
                    if np.linalg.norm(d) > 1e-12:
                        return d / np.linalg.norm(d)
                except:
                    pass
                    
            except np.linalg.LinAlgError:
                pass
            
            # 如果所有方法都失败，返回零向量（收敛）
            return np.zeros(n)
        
        def line_search(x, direction, constraints, alpha_max=1.0):
            """线搜索确定步长"""
            if np.linalg.norm(direction) < 1e-12:
                return 0.0
            
            # 确定最大可行步长
            alpha_feasible = alpha_max
            for con in constraints:
                if con['type'] == 'ineq':
                    # 对于不等式约束 g(x) >= 0
                    # 需要 g(x + alpha*d) >= 0
                    x_test = x + 1e-6 * direction
                    grad_con = gradient_numerical(lambda y: constraint_value(y, con), x)
                    denom = np.dot(grad_con, direction)
                    if denom < -1e-12:  # 方向会违反约束
                        val = constraint_value(x, con)
                        alpha_bound = val / (-denom)
                        alpha_feasible = min(alpha_feasible, alpha_bound * 0.99)
            
            # Armijo线搜索
            c1 = 1e-4
            alpha = min(alpha_feasible, 1.0)
            grad_f = gradient_numerical(objective, x)
            descent_condition = np.dot(grad_f, direction)
            
            if descent_condition >= 0:  # 不是下降方向
                return 0.0
            
            f_x = objective(x)
            
            for _ in range(20):  # 最多20次回退
                if alpha < 1e-12:
                    break
                x_new = x + alpha * direction
                if is_feasible(x_new, constraints):
                    f_new = objective(x_new)
                    if f_new <= f_x + c1 * alpha * descent_condition:
                        return alpha
                alpha *= 0.5
            
            return 0.0
        
        # 主算法循环
        x = np.array(initial_point, dtype=float)
        history = [x.copy()]
        
        if not is_feasible(x, constraints):
            return {
                "optimal_point": None,
                "optimal_value": None,
                "success": False,
                "iterations": 0,
                "message": "初始点不可行",
                "method": "Zoutendijk可行方向法"
            }
        
        for iteration in range(max_iterations):
            # 计算目标函数梯度
            grad_f = gradient_numerical(objective, x)
            
            # 检查一阶最优性条件
            if np.linalg.norm(grad_f) < tolerance:
                break
            
            # 寻找可行方向
            direction = find_feasible_direction(x, constraints, grad_f)
            
            # 检查是否找到有效方向
            if np.linalg.norm(direction) < tolerance:
                break
            
            # 线搜索确定步长
            alpha = line_search(x, direction, constraints)
            
            if alpha < tolerance:
                break
            
            # 更新点
            x_new = x + alpha * direction
            
            # 检查收敛性
            if np.linalg.norm(x_new - x) < tolerance:
                x = x_new
                break
            
            x = x_new
            history.append(x.copy())
        
        return {
            "optimal_point": x.tolist(),
            "optimal_value": float(objective(x)),
            "success": True,
            "iterations": iteration + 1,
            "message": "Zoutendijk可行方向法收敛",
            "method": "Zoutendijk可行方向法",
            "history": [point.tolist() for point in history]
        }
        
    except Exception as e:
        return f"错误：Zoutendijk可行方向法优化失败 - {str(e)}"


# ==================== 智能推荐器模块 ====================

@mcp.tool()
def recommend_optimizer(
    problem_type: str,
    has_constraints: bool,
    has_integer_vars: bool,
    is_blackbox: bool,
    dimension: int,
    is_convex: Optional[bool] = None
) -> str:
    """
    基于问题特征推荐最适合的优化算法
    """
    if has_integer_vars:
        if problem_type == "linear":
            return "推荐算法：混合整数线性规划 MILP（pulp）"
        else:
            return "推荐算法：遗传算法 GA 或 模拟退火 SA（适用于整数/非线性问题）"

    if is_blackbox:
        if dimension <= 100:
            return "推荐算法：差分进化 DE（适用于黑盒/非凸问题）"
        else:
            return "推荐算法：粒子群优化 PSO 或 遗传算法 GA（高维黑盒优化）"

    if problem_type == "linear":
        return "推荐算法：线性规划 linprog（HiGHS）"

    if problem_type == "quadratic":
        return "推荐算法：二次规划 QP（cvxopt）"

    if has_constraints:
        return "推荐算法：SQP 或 增广拉格朗日法 或 内点法 或 Zoutendijk可行方向法（支持等式/不等式约束）"

    if dimension > 100:
        return "推荐算法：L-BFGS 或 PSO（适用于高维优化）"

    if not has_constraints and problem_type == "nonlinear":
        return "推荐算法：BFGS 或 牛顿法（快速局部收敛）"

    # 新增的推荐算法
    if is_convex is not None:
        if is_convex:
            return "推荐算法：信赖域方法（适用于凸优化问题）"
        else:
            return "推荐算法：梯度投影法 或 可行方向法（适用于非凸优化问题）"

    return "推荐算法：默认使用 BFGS 或 SQP；若不确定建议使用 DE 或 GA"



# ==================== 可视化模块 ====================

@mcp.tool()
def visualize_optimization_problem(
    objective_func: str,
    constraints: Optional[List[Dict[str, str]]] = None,
    optimal_point: Optional[List[float]] = None,
    x_range: Optional[List[float]] = None,
    y_range: Optional[List[float]] = None,
    save_path: Optional[str] = None
) -> Union[Dict[str, Any], str]:
    """
    可视化约束优化问题：目标函数等高线 + 约束条件 + 最优解
    """
    try:
        import matplotlib.pyplot as plt
        import numpy as np
        
        # 将Python表达式转换为LaTeX格式
        def python_to_latex(expr):
            """将Python数学表达式转换为LaTeX格式"""
            latex_expr = expr
            import re
            # 替换不等号
            latex_expr = re.sub(r'<=', r'\\leq', latex_expr)
            latex_expr = re.sub(r'>=', r'\\geq', latex_expr)
            latex_expr = re.sub(r'!=', r'\\neq', latex_expr)
            # 替换幂运算
            latex_expr = re.sub(r'\*\*([0-9]+)', r'^{\1}', latex_expr)
            latex_expr = re.sub(r'\*\*\(([^)]+)\)', r'^{\1}', latex_expr)
            # 替换乘法符号
            latex_expr = re.sub(r'\*', r'\\cdot ', latex_expr)
            # 处理括号中的表达式
            latex_expr = re.sub(r'\(([^)]+)\)', r'(\1)', latex_expr)
            return latex_expr
        
        # 设置字体
        import matplotlib.font_manager as fm
        try:
            # 尝试使用系统中文字体
            chinese_fonts = ['SimHei', 'Microsoft YaHei', 'DejaVu Sans', 'Arial Unicode MS']
            available_fonts = [f.name for f in fm.fontManager.ttflist]
            
            for font in chinese_fonts:
                if font in available_fonts:
                    plt.rcParams['font.sans-serif'] = [font]
                    break
            else:
                plt.rcParams['font.sans-serif'] = ['DejaVu Sans']
            
            plt.rcParams['axes.unicode_minus'] = False
            plt.rcParams['font.family'] = plt.rcParams['font.sans-serif']
            plt.rcParams['mathtext.fontset'] = 'stix'
            plt.rcParams['mathtext.rm'] = 'Times New Roman'
            plt.rcParams['mathtext.it'] = 'Times New Roman:italic'
            plt.rcParams['mathtext.bf'] = 'Times New Roman:bold'
            plt.rcParams['font.size'] = 12
        except Exception:
            plt.rcParams['font.family'] = 'DejaVu Sans'
        
        # 设置范围
        x_min, x_max = x_range if x_range else [-5, 5]
        y_min, y_max = y_range if y_range else [-5, 5]
        
        # 创建网格
        x = np.linspace(x_min, x_max, 400)
        y = np.linspace(y_min, y_max, 400)
        X, Y = np.meshgrid(x, y)
        
        # 计算目标函数值
        def eval_objective(x_val, y_val):
            import math
            safe_dict = {
                "__builtins__": {},
                "sin": math.sin, "cos": math.cos, "exp": math.exp,
                "log": math.log, "sqrt": math.sqrt, "abs": abs,
                "pi": math.pi, "e": math.e, "pow": pow
            }
            expr = objective_func.replace('x', str(x_val)).replace('y', str(y_val))
            return eval(expr, safe_dict, {})
        
        Z = np.zeros_like(X)
        for i in range(X.shape[0]):
            for j in range(X.shape[1]):
                try:
                    Z[i, j] = eval_objective(X[i, j], Y[i, j])
                except:
                    Z[i, j] = np.nan
        
        # 创建图形
        fig, ax = plt.subplots(1, 1, figsize=(12, 10))
        
        # 绘制目标函数等高线
        contour = ax.contour(X, Y, Z, levels=20, colors='lightblue', alpha=0.6)
        ax.clabel(contour, inline=True, fontsize=8)
        
        # 绘制约束条件
        if constraints:
            colors = ['red', 'orange', 'green', 'purple', 'brown']
            for i, constraint in enumerate(constraints):
                color = colors[i % len(colors)]
                constraint_type = constraint.get('type', 'eq')
                constraint_func = constraint.get('fun', '')
                
                if constraint_type == 'eq':
                    # 等式约束：绘制零等高线
                    def eval_constraint(x_val, y_val):
                        import math
                        safe_dict = {
                            "__builtins__": {},
                            "sin": math.sin, "cos": math.cos, "exp": math.exp,
                            "log": math.log, "sqrt": math.sqrt, "abs": abs,
                            "pi": math.pi, "e": math.e, "pow": pow
                        }
                        expr = constraint_func.replace('x', str(x_val)).replace('y', str(y_val))
                        return eval(expr, safe_dict, {})
                    
                    C = np.zeros_like(X)
                    for ii in range(X.shape[0]):
                        for jj in range(X.shape[1]):
                            try:
                                C[ii, jj] = eval_constraint(X[ii, jj], Y[ii, jj])
                            except:
                                C[ii, jj] = np.nan
                    
                    ax.contour(X, Y, C, levels=[0], colors=[color], linewidths=2)
                    # 使用display字段或转换约束函数为LaTeX格式
                    display_text = constraint.get('display', constraint_func)
                    latex_constraint = python_to_latex(display_text)
                    ax.plot([], [], color=color, linewidth=2, label=f'等式约束 {i+1}: ${latex_constraint}$')
                    
                elif constraint_type == 'ineq':
                    # 不等式约束：绘制可行域
                    def eval_constraint(x_val, y_val):
                        import math
                        safe_dict = {
                            "__builtins__": {},
                            "sin": math.sin, "cos": math.cos, "exp": math.exp,
                            "log": math.log, "sqrt": math.sqrt, "abs": abs,
                            "pi": math.pi, "e": math.e, "pow": pow
                        }
                        expr = constraint_func.replace('x', str(x_val)).replace('y', str(y_val))
                        return eval(expr, safe_dict, {})
                    
                    C = np.zeros_like(X)
                    for ii in range(X.shape[0]):
                        for jj in range(X.shape[1]):
                            try:
                                C[ii, jj] = eval_constraint(X[ii, jj], Y[ii, jj])
                            except:
                                C[ii, jj] = np.nan
                    
                    # 绘制边界线
                    ax.contour(X, Y, C, levels=[0], colors=[color], linewidths=2, linestyles='--')
                    # 填充可行域
                    ax.contourf(X, Y, C, levels=[0, np.inf], colors=[color], alpha=0.2)
                    # 使用display字段或转换约束函数为LaTeX格式
                    display_text = constraint.get('display', f'{constraint_func} \\geq 0')
                    latex_constraint = python_to_latex(display_text)
                    ax.plot([], [], color=color, linewidth=2, linestyle='--', label=f'不等式约束 {i+1}: ${latex_constraint}$')
        
        # 标记最优解
        if optimal_point and len(optimal_point) >= 2:
            ax.plot(optimal_point[0], optimal_point[1], 'ro', markersize=12, label=f'最优解: $({optimal_point[0]:.3f}, {optimal_point[1]:.3f})$')
            
            # 计算最优值
            try:
                optimal_value = eval_objective(optimal_point[0], optimal_point[1])
                ax.text(optimal_point[0], optimal_point[1] + 0.2, f'最优值: ${optimal_value:.6f}$', 
                        ha='center', bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.8))
            except:
                pass
        
        # 设置图形属性
        ax.set_xlim(x_min, x_max)
        ax.set_ylim(y_min, y_max)
        ax.set_xlabel('$x$', fontsize=14)
        ax.set_ylabel('$y$', fontsize=14)
        latex_func = python_to_latex(objective_func)
        ax.set_title(f'约束优化问题可视化\n目标函数: ${latex_func}$', fontsize=16)
        ax.grid(True, alpha=0.3)
        ax.legend(fontsize=10)
        
        # 保存图形
        if save_path:
            plt.savefig(save_path, dpi=300, bbox_inches='tight')
        
        plt.tight_layout()
        plt.show()
        
        return {
            "status": "success",
            "message": "可视化完成",
            "save_path": save_path if save_path else "未保存"
        }
        
    except Exception as e:
        return f"错误：可视化失败 - {str(e)}"

@mcp.tool()
def visualize_convergence_history(
    history: List[List[float]],
    objective_values: Optional[List[float]] = None,
    save_path: Optional[str] = None
) -> Union[Dict[str, Any], str]:
    """
    可视化优化收敛过程：迭代轨迹 + 目标函数值变化
    返回base64编码的图片
    """
    try:
        import matplotlib.pyplot as plt
        import numpy as np
        import base64
        import io
        
        # 设置字体
        import matplotlib.font_manager as fm
        try:
            # 尝试使用系统中文字体
            chinese_fonts = ['SimHei', 'Microsoft YaHei', 'DejaVu Sans', 'Arial Unicode MS']
            available_fonts = [f.name for f in fm.fontManager.ttflist]
            
            for font in chinese_fonts:
                if font in available_fonts:
                    plt.rcParams['font.sans-serif'] = [font]
                    break
            else:
                plt.rcParams['font.sans-serif'] = ['DejaVu Sans']
            
            plt.rcParams['axes.unicode_minus'] = False
            plt.rcParams['font.family'] = plt.rcParams['font.sans-serif']
            plt.rcParams['mathtext.fontset'] = 'stix'
            plt.rcParams['mathtext.rm'] = 'Times New Roman'
            plt.rcParams['mathtext.it'] = 'Times New Roman:italic'
            plt.rcParams['mathtext.bf'] = 'Times New Roman:bold'
            plt.rcParams['font.size'] = 12
        except Exception:
            plt.rcParams['font.family'] = 'DejaVu Sans'
        
        history = np.array(history)
        
        if history.shape[1] == 2:
            # 二维优化轨迹
            fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 6))
            
            # 左图：迭代轨迹
            ax1.plot(history[:, 0], history[:, 1], 'b-o', markersize=4, linewidth=1.5, label='优化轨迹')
            ax1.plot(history[0, 0], history[0, 1], 'go', markersize=8, label='起始点')
            ax1.plot(history[-1, 0], history[-1, 1], 'ro', markersize=8, label='最终点')
            ax1.set_xlabel('$x$', fontsize=14)
            ax1.set_ylabel('$y$', fontsize=14)
            ax1.set_title('优化轨迹', fontsize=16)
            ax1.grid(True, alpha=0.3)
            ax1.legend()
            
            # 右图：目标函数值变化
            if objective_values:
                ax2.plot(range(len(objective_values)), objective_values, 'r-o', markersize=4, linewidth=1.5)
                ax2.set_xlabel('迭代次数', fontsize=14)
                ax2.set_ylabel('目标函数值', fontsize=14)
                ax2.set_title('收敛过程', fontsize=16)
                ax2.grid(True, alpha=0.3)
                ax2.set_yscale('log')
            else:
                ax2.text(0.5, 0.5, '无目标函数数据', ha='center', va='center', transform=ax2.transAxes)
                ax2.set_title('目标函数值变化', fontsize=16)
        
        else:
            # 高维优化：只显示目标函数值变化
            fig, ax = plt.subplots(1, 1, figsize=(10, 6))
            
            if objective_values:
                ax.plot(range(len(objective_values)), objective_values, 'r-o', markersize=4, linewidth=1.5)
                ax.set_xlabel('迭代次数', fontsize=14)
                ax.set_ylabel('目标函数值', fontsize=14)
                ax.set_title('高维优化收敛过程', fontsize=16)
                ax.grid(True, alpha=0.3)
                ax.set_yscale('log')
            else:
                ax.text(0.5, 0.5, '无目标函数数据', ha='center', va='center', transform=ax.transAxes)
                ax.set_title('收敛过程', fontsize=16)
        
        plt.tight_layout()
        
        # 保存到内存并转换为base64
        buffer = io.BytesIO()
        plt.savefig(buffer, format='png', dpi=300, bbox_inches='tight')
        buffer.seek(0)
        image_base64 = base64.b64encode(buffer.getvalue()).decode('utf-8')
        buffer.close()
        
        # 可选：保存到文件
        if save_path:
            plt.savefig(save_path, dpi=300, bbox_inches='tight')
        
        plt.close(fig)  # 关闭图形以释放内存
        
        return {
            "status": "success",
            "message": "收敛过程可视化完成",
            "iterations": len(history),
            "image_base64": image_base64,
            "save_path": save_path if save_path else "未保存"
        }
        
    except Exception as e:
        return f"错误：收敛过程可视化失败 - {str(e)}"



@mcp.tool()
def solve_optimization_problem(
    objective_func: str,
    initial_point: Optional[List[float]] = None,
    constraints: Optional[List[Dict[str, str]]] = None,
    bounds: Optional[List[Tuple[float, float]]] = None,
    problem_type: str = "nonlinear",  # "linear", "quadratic", "nonlinear"
    has_integer_vars: bool = False,
    is_blackbox: bool = False
) -> Union[Dict[str, Any], str]:
    """
    智能求解优化问题：根据输入自动推荐算法并求解
    """
    try:
        has_constraints = bool(constraints)
        dimension = len(initial_point) if initial_point else 2
        recommended = recommend_optimizer(
            problem_type=problem_type,
            has_constraints=has_constraints,
            has_integer_vars=has_integer_vars,
            is_blackbox=is_blackbox,
            dimension=dimension,
            is_convex=True  # 可选
        )

        # 统一调度对应求解函数
        if has_integer_vars and problem_type == "linear":
            # MILP
            c = [float(term) for term in objective_func.strip().split()]  # 例如：objective_func="1 2 3"
            A = [[1]*len(c)]  # dummy
            b = [1000]
            return {
                "推荐算法": recommended,
                "求解结果": mixed_integer_linear_programming(c=c, A=A, b=b,
                                                           integer_indices=list(range(len(c))),
                                                           bounds=bounds)
            }

        elif problem_type == "linear":
            # Linear Programming
            c = [float(term) for term in objective_func.strip().split()]
            A_ub = [[1]*len(c)]
            b_ub = [1000]
            return {
                "推荐算法": recommended,
                "求解结果": linear_programming(c=c, A_ub=A_ub, b_ub=b_ub, bounds=bounds)
            }

        elif has_constraints:
            if is_blackbox:
                return {
                    "推荐算法": recommended,
                    "求解结果": differential_evolution_optimization(objective_func=objective_func, bounds=bounds, minimize=True)
                }
            else:
                return {
                    "推荐算法": recommended,
                    "求解结果": sqp_optimization(objective_func=objective_func, constraints=constraints, initial_point=initial_point)
                }

        elif not has_constraints:
            return {
                "推荐算法": recommended,
                "求解结果": bfgs_optimization(objective_func=objective_func, initial_point=initial_point)
            }

        else:
            return {
                "推荐算法": "默认算法：差分进化",
                "求解结果": differential_evolution_optimization(objective_func=objective_func, bounds=bounds, minimize=True)
            }

    except Exception as e:
        return f"错误：自动求解失败 - {str(e)}"

# ================ 模块3 插值和拟合模块 =============
@mcp.tool()
def polynomial_interpolation(x_points: List[float], y_points: List[float], method: str = "lagrange", derivatives: Optional[List[float]] = None) -> Union[Dict[str, Any], str]:
    """
    多项式插值算法
    
    Args:
        x_points: x坐标点
        y_points: y坐标点
        method: 插值方法 (lagrange, newton, cubic_spline, rbf, hermite)
        derivatives: 埃尔米特插值所需的导数值（仅在method="hermite"时使用）
    
    Returns:
        插值结果和系数
    """
    try:
        x_points = np.array(x_points)
        y_points = np.array(y_points)
        
        if len(x_points) != len(y_points):
            return "错误：x和y坐标点数量不匹配"
        
        if method == "lagrange":
            # 拉格朗日插值
            def lagrange_poly(x):
                result = 0
                n = len(x_points)
                for i in range(n):
                    term = y_points[i]
                    for j in range(n):
                        if i != j:
                            term *= (x - x_points[j]) / (x_points[i] - x_points[j])
                    result += term
                return result
            
            # 生成符号表达式和LaTeX公式
            try:
                x_sym = sp.Symbol('x')
                lagrange_expr = 0
                n = len(x_points)
                
                for i in range(n):
                    term = y_points[i]
                    for j in range(n):
                        if i != j:
                            term *= (x_sym - x_points[j]) / (x_points[i] - x_points[j])
                    lagrange_expr += term
                
                # 展开并简化多项式
                lagrange_expanded = sp.expand(lagrange_expr)
                latex_formula = sp.latex(lagrange_expanded)
            except Exception as latex_error:
                latex_formula = f"LaTeX生成错误: {str(latex_error)}"
                lagrange_expanded = "符号表达式生成失败"
            
            # 生成测试点
            x_test = np.linspace(min(x_points), max(x_points), 100)
            y_test = [lagrange_poly(x) for x in x_test]
            
            # 确保LaTeX公式是字符串类型
            latex_str = str(latex_formula) if latex_formula else "LaTeX公式生成失败"
            symbolic_str = str(lagrange_expanded) if lagrange_expanded else "符号表达式生成失败"
            
            # 生成友好的数学公式格式
            def format_display_formula(latex_str, symbolic_str):
                """生成适合直接显示的数学公式格式"""
                if "LaTeX公式生成失败" in latex_str or "错误" in latex_str:
                    # 如果LaTeX生成失败，使用符号表达式
                    if "符号表达式生成失败" not in symbolic_str:
                        return f"f(x) = {symbolic_str}"
                    else:
                        return "公式生成失败"
                
                # 返回LaTeX格式的公式，用于直接显示
                return f"$$f(x) = {latex_str}$$"
            
            display_formula = format_display_formula(latex_str, symbolic_str)
            
            # 生成友好格式的表达式
            def format_friendly_expression_local(symbolic_str):
                """生成友好格式的数学表达式"""
                try:
                    if "符号表达式生成失败" in str(symbolic_str) or not symbolic_str:
                        return "表达式生成失败"
                    
                    # 简化表达式显示
                    expr_str = str(symbolic_str)
                    
                    # 替换常见的数学符号为更友好的格式
                    expr_str = expr_str.replace('**', '^')
                    expr_str = expr_str.replace('*', '·')
                    
                    return f"f(x) = {expr_str}"
                except Exception as e:
                    return f"表达式格式化失败: {str(e)}"
            
            friendly_formula = format_friendly_expression_local(symbolic_str)
            
            result = {
                "method": "拉格朗日插值",
                "x_interpolated": x_test.tolist(),
                "y_interpolated": y_test,
                "original_points": {"x": x_points.tolist(), "y": y_points.tolist()},
                "latex_formula": latex_str,
                "symbolic_expression": symbolic_str,
                "display_formula": display_formula,  # 适合直接显示的数学公式
                "friendly_formula": friendly_formula,
                "formula_info": {
                    "latex_length": len(latex_str),
                    "has_latex": bool(latex_str and latex_str != "LaTeX公式生成失败")
                }
            }
            print(f"DEBUG: 拉格朗日插值 - LaTeX公式: {latex_str}")
            print(f"DEBUG: 拉格朗日插值 - 原始符号表达式: {symbolic_str}")
            print(f"DEBUG: 拉格朗日插值 - 显示公式: {display_formula}")
            print(f"DEBUG: 拉格朗日插值 - 友好格式变量: {friendly_formula}")
            print(f"DEBUG: 拉格朗日插值 - 结果字典键: {list(result.keys())}")
            print(f"DEBUG: 拉格朗日插值 - symbolic_expression字段值: {result['symbolic_expression']}")
            return result
        
        elif method == "newton":
            # 牛顿插值（差分法）
            def newton_divided_differences(x_vals, y_vals):
                n = len(x_vals)
                # 创建差分表
                diff_table = np.zeros((n, n))
                diff_table[:, 0] = y_vals
                
                for j in range(1, n):
                    for i in range(n - j):
                        diff_table[i, j] = (diff_table[i + 1, j - 1] - diff_table[i, j - 1]) / (x_vals[i + j] - x_vals[i])
                
                return diff_table[0, :]
            
            def newton_poly(x, x_vals, coeffs):
                result = coeffs[0]
                for i in range(1, len(coeffs)):
                    term = coeffs[i]
                    for j in range(i):
                        term *= (x - x_vals[j])
                    result += term
                return result
            
            coeffs = newton_divided_differences(x_points, y_points)
            
            # 生成符号表达式和LaTeX公式
            try:
                x_sym = sp.Symbol('x')
                newton_expr = coeffs[0]
                
                for i in range(1, len(coeffs)):
                    term = coeffs[i]
                    for j in range(i):
                        term *= (x_sym - x_points[j])
                    newton_expr += term
                
                # 展开并简化多项式
                newton_expanded = sp.expand(newton_expr)
                latex_formula = sp.latex(newton_expanded)
            except Exception as latex_error:
                latex_formula = f"LaTeX生成错误: {str(latex_error)}"
                newton_expanded = "符号表达式生成失败"
            
            x_test = np.linspace(min(x_points), max(x_points), 100)
            y_test = [newton_poly(x, x_points, coeffs) for x in x_test]
            
            # 确保LaTeX公式是字符串类型
            latex_str = str(latex_formula) if latex_formula else "LaTeX公式生成失败"
            symbolic_str = str(newton_expanded) if newton_expanded else "符号表达式生成失败"
            
            # 生成友好的数学公式格式
            def format_display_formula(latex_str, symbolic_str):
                """生成适合直接显示的数学公式格式"""
                if "LaTeX公式生成失败" in latex_str or "错误" in latex_str:
                    # 如果LaTeX生成失败，使用符号表达式
                    if "符号表达式生成失败" not in symbolic_str:
                        return f"f(x) = {symbolic_str}"
                    else:
                        return "公式生成失败"
                
                # 返回LaTeX格式的公式，用于直接显示
                return f"$$f(x) = {latex_str}$$"
            
            display_formula = format_display_formula(latex_str, symbolic_str)
            
            result = {
                "method": "牛顿插值",
                "coefficients": coeffs.tolist(),
                "x_interpolated": x_test.tolist(),
                "y_interpolated": y_test,
                "original_points": {"x": x_points.tolist(), "y": y_points.tolist()},
                "latex_formula": latex_str,
                "symbolic_expression": symbolic_str,
                "display_formula": display_formula,  # 适合直接显示的数学公式
                "formula_info": {
                    "latex_length": len(latex_str),
                    "has_latex": bool(latex_str and latex_str != "LaTeX公式生成失败")
                }
            }
            print(f"DEBUG: 牛顿插值 - LaTeX公式: {latex_str}")
            print(f"DEBUG: 牛顿插值 - 符号表达式: {symbolic_str}")
            print(f"DEBUG: 牛顿插值 - 结果字典键: {list(result.keys())}")
            return result
        
        elif method == "cubic_spline":
            try:
                # 转换为numpy数组并检查维度
                x_points = np.array(x_points, dtype=np.float64)
                y_points = np.array(y_points, dtype=np.float64)
                
                print(f"原始数据维度检查:")
                print(f"x_points长度: {len(x_points)}, 内容: {x_points}")
                print(f"y_points长度: {len(y_points)}, 内容: {y_points}")
                
                # 检查输入数据维度
                if len(x_points) != len(y_points):
                    raise ValueError(f"x和y坐标点数量不匹配: x有{len(x_points)}个点，y有{len(y_points)}个点")
                
                # 检查数据有效性
                if len(x_points) < 2:
                    raise ValueError("至少需要2个数据点进行三次样条插值")
                
                # 确保x坐标是严格递增的
                if not np.all(np.diff(x_points) >= 0):
                    # 如果x不是严格递增，进行排序
                    sorted_indices = np.argsort(x_points)
                    x_points = x_points[sorted_indices]
                    y_points = y_points[sorted_indices]
                    print(f"数据已按x坐标排序")
                
                # 创建三次样条插值对象
                cs = CubicSpline(x_points, y_points)
                
                # 生成插值点 - 确保数量一致且为整数
                num_points = 100  # 固定插值点数量
                x_test = np.linspace(x_points.min(), x_points.max(), num_points)
                
                # 计算插值结果
                y_test = cs(x_test)

                
                print(f"插值后数据维度检查:")
                print(f"x_test长度: {len(x_test)}, 类型: {type(x_test)}, dtype: {x_test.dtype}")
                print(f"y_test长度: {len(y_test)}, 类型: {type(y_test)}, dtype: {y_test.dtype}")

                
                # 验证最终维度
                if len(x_test) != len(y_test):
                    raise ValueError(f"维度修正失败: x_test长度={len(x_test)}, y_test长度={len(y_test)}")
                
                print(f"最终维度修正后: x_test长度={len(x_test)}, y_test长度={len(y_test)}")
                
                # 转换为列表，确保JSON序列化兼容性
                x_interpolated_list = [float(x) for x in x_test]
                y_interpolated_list = [float(y) for y in y_test]
                
                # 返回结果
                result = {
                    "method": "三次样条插值",
                    "x_interpolated": x_interpolated_list,
                    "y_interpolated": y_interpolated_list,
                    "original_points": {
                        "x": [float(x) for x in x_points],
                        "y": [float(y) for y in y_points]
                    },
                    "interpolation_info": {
                        "original_points_count": len(x_points),
                        "interpolated_points_count": len(x_interpolated_list),
                        "x_range": [float(x_points.min()), float(x_points.max())],
                        "y_range": [float(min(y_interpolated_list)), float(max(y_interpolated_list))],
                        "dimension_check": {
                            "x_length": len(x_interpolated_list),
                            "y_length": len(y_interpolated_list),
                            "is_consistent": len(x_interpolated_list) == len(y_interpolated_list)
                        }
                    }
                }
                
                print(f"最终结果维度验证:")
                print(f"x_interpolated长度: {len(result['x_interpolated'])}")
                print(f"y_interpolated长度: {len(result['y_interpolated'])}")
                print(f"维度一致性: {result['interpolation_info']['dimension_check']['is_consistent']}")
                
                return result
                
            except Exception as e:
                print(f"三次样条插值错误: {str(e)}")
                return {"error": f"三次样条插值失败: {str(e)}"}


        
        elif method == "rbf":
            # 径向基函数插值
            from scipy.interpolate import Rbf
            rbf = Rbf(x_points, y_points, function='multiquadric')
            
            x_test = np.linspace(min(x_points), max(x_points), 100)
            y_test = rbf(x_test)
            
            return {
                "method": "径向基函数插值",
                "x_interpolated": x_test.tolist(),
                "y_interpolated": y_test.tolist(),
                "original_points": {"x": x_points.tolist(), "y": y_points.tolist()}
            }
        
        elif method == "hermite":
            # 埃尔米特插值
            if derivatives is None:
                return "错误：埃尔米特插值需要提供导数值"
            
            if len(derivatives) != len(x_points):
                return "错误：导数值数量必须与数据点数量相同"
            
            # 使用scipy的CubicHermiteSpline进行埃尔米特插值
            from scipy.interpolate import CubicHermiteSpline
            derivatives = np.array(derivatives)
            hermite_interp = CubicHermiteSpline(x_points, y_points, derivatives)
            
            x_test = np.linspace(min(x_points), max(x_points), 100)
            y_test = hermite_interp(x_test)
            
            return {
                "method": "埃尔米特插值",
                "x_interpolated": x_test.tolist(),
                "y_interpolated": y_test.tolist(),
                "original_points": {"x": x_points.tolist(), "y": y_points.tolist()},
                "derivatives": derivatives.tolist()
            }
        
        return "错误：不支持的插值方法"
        
    except Exception as e:
        return f"错误：插值计算失败 - {str(e)}"



@mcp.tool()
def curve_fitting(x_data: List[float], y_data: List[float], degree: int = 2, method: str = "polynomial", alpha: float = 1.0) -> Union[Dict[str, Any], str]:
    """
    曲线拟合算法
    
    Args:
        x_data: x数据点
        y_data: y数据点
        degree: 多项式次数
        method: 拟合方法 (polynomial, exponential, logarithmic, ridge, lasso)
        alpha: 正则化参数（用于ridge和lasso）
    
    Returns:
        拟合结果和参数，包含R²和RMSE评估
    """
    try:
        x_data = np.array(x_data)
        y_data = np.array(y_data)
        
        if len(x_data) != len(y_data):
            return "错误：x和y数据点数量不匹配"
        
        if method == "polynomial":
            # 多项式拟合
            coeffs = np.polyfit(x_data, y_data, degree)
            poly = np.poly1d(coeffs)
            
            # 计算拟合优度
            y_pred = poly(x_data)
            r_squared = 1 - np.sum((y_data - y_pred) ** 2) / np.sum((y_data - np.mean(y_data)) ** 2)
            rmse = np.sqrt(np.mean((y_data - y_pred) ** 2))
            
            # 生成拟合曲线
            x_fit = np.linspace(min(x_data), max(x_data), 100)
            y_fit = poly(x_fit)
            
            return {
                "method": f"{degree}次多项式拟合",
                "coefficients": coeffs.tolist(),
                "r_squared": float(r_squared),
                "rmse": float(rmse),
                "x_fitted": x_fit.tolist(),
                "y_fitted": y_fit.tolist(),
                "original_data": {"x": x_data.tolist(), "y": y_data.tolist()}
            }
        
        elif method == "exponential":
            # 指数拟合 y = a * exp(b * x)
            from scipy.optimize import curve_fit
            
            def exp_func(x, a, b):
                return a * np.exp(b * x)
            
            popt, pcov = curve_fit(exp_func, x_data, y_data)
            
            # 计算拟合优度
            y_pred = exp_func(x_data, *popt)
            r_squared = 1 - np.sum((y_data - y_pred) ** 2) / np.sum((y_data - np.mean(y_data)) ** 2)
            rmse = np.sqrt(np.mean((y_data - y_pred) ** 2))
            
            # 生成拟合曲线
            x_fit = np.linspace(min(x_data), max(x_data), 100)
            y_fit = exp_func(x_fit, *popt)
            
            return {
                "method": "指数拟合",
                "parameters": {"a": float(popt[0]), "b": float(popt[1])},
                "r_squared": float(r_squared),
                "rmse": float(rmse),
                "x_fitted": x_fit.tolist(),
                "y_fitted": y_fit.tolist(),
                "original_data": {"x": x_data.tolist(), "y": y_data.tolist()}
            }
        
        elif method == "logarithmic":
            # 对数拟合 y = a * ln(x) + b
            from scipy.optimize import curve_fit
            
            def log_func(x, a, b):
                return a * np.log(x) + b
            
            # 确保x > 0
            if np.any(x_data <= 0):
                return "错误：对数拟合要求所有x值大于0"
            
            popt, pcov = curve_fit(log_func, x_data, y_data)
            
            # 计算拟合优度
            y_pred = log_func(x_data, *popt)
            r_squared = 1 - np.sum((y_data - y_pred) ** 2) / np.sum((y_data - np.mean(y_data)) ** 2)
            rmse = np.sqrt(np.mean((y_data - y_pred) ** 2))
            
            # 生成拟合曲线
            x_fit = np.linspace(min(x_data), max(x_data), 100)
            y_fit = log_func(x_fit, *popt)
            
            return {
                "method": "对数拟合",
                "parameters": {"a": float(popt[0]), "b": float(popt[1])},
                "r_squared": float(r_squared),
                "rmse": float(rmse),
                "x_fitted": x_fit.tolist(),
                "y_fitted": y_fit.tolist(),
                "original_data": {"x": x_data.tolist(), "y": y_data.tolist()}
            }
        
        elif method == "ridge":
            # Ridge回归（岭回归）
            # 生成多项式特征
            poly_features = PolynomialFeatures(degree=degree)
            X_poly = poly_features.fit_transform(x_data.reshape(-1, 1))
            
            # Ridge回归拟合
            ridge = Ridge(alpha=alpha)
            ridge.fit(X_poly, y_data)
            
            # 预测和评估
            y_pred = ridge.predict(X_poly)
            r_squared = ridge.score(X_poly, y_data)
            rmse = np.sqrt(mean_squared_error(y_data, y_pred))
            mae = np.mean(np.abs(y_data - y_pred))  # 平均绝对误差
            
            # 生成拟合曲线
            x_fit = np.linspace(min(x_data), max(x_data), 100)
            X_fit_poly = poly_features.transform(x_fit.reshape(-1, 1))
            y_fit = ridge.predict(X_fit_poly)
            
            return {
                "method": f"Ridge回归（{degree}次多项式）",
                "coefficients": ridge.coef_.tolist(),
                "intercept": float(ridge.intercept_),
                "alpha": alpha,
                "r_squared": float(r_squared),
                "rmse": float(rmse),
                "mae": float(mae),
                "x_fitted": x_fit.tolist(),
                "y_fitted": y_fit.tolist(),
                "original_data": {"x": x_data.tolist(), "y": y_data.tolist()}
            }
        
        elif method == "lasso":
            # Lasso回归
            # 生成多项式特征
            poly_features = PolynomialFeatures(degree=degree)
            X_poly = poly_features.fit_transform(x_data.reshape(-1, 1))
            
            # Lasso回归拟合
            lasso = Lasso(alpha=alpha, max_iter=2000)
            lasso.fit(X_poly, y_data)
            
            # 预测和评估
            y_pred = lasso.predict(X_poly)
            r_squared = lasso.score(X_poly, y_data)
            rmse = np.sqrt(mean_squared_error(y_data, y_pred))
            mae = np.mean(np.abs(y_data - y_pred))
            
            # 计算非零系数数量（特征选择效果）
            non_zero_coefs = np.sum(np.abs(lasso.coef_) > 1e-10)
            
            # 生成拟合曲线
            x_fit = np.linspace(min(x_data), max(x_data), 100)
            X_fit_poly = poly_features.transform(x_fit.reshape(-1, 1))
            y_fit = lasso.predict(X_fit_poly)
            
            return {
                "method": f"Lasso回归（{degree}次多项式）",
                "coefficients": lasso.coef_.tolist(),
                "intercept": float(lasso.intercept_),
                "alpha": alpha,
                "r_squared": float(r_squared),
                "rmse": float(rmse),
                "mae": float(mae),
                "non_zero_features": int(non_zero_coefs),
                "x_fitted": x_fit.tolist(),
                "y_fitted": y_fit.tolist(),
                "original_data": {"x": x_data.tolist(), "y": y_data.tolist()}
            }
        
        return "错误：不支持的拟合方法"
        
    except Exception as e:
        return f"错误：曲线拟合失败 - {str(e)}"


# 重复的函数定义已移除，使用上面定义的版本

@mcp.tool()
def generate_visualization(data: Dict[str, Any], chart_type: str = "matplotlib", title: str = "插值与拟合结果", save_path: Optional[str] = None) -> str:
   
    """
    生成插值和拟合结果的可视化图表，并直接展示和保存为PNG图像

    Args:
        data: 插值或拟合结果数据
        chart_type: 图表类型 (matplotlib, plotly)
        title: 图表标题
        save_path: 保存图片的路径（可选）

    Returns:
        图片保存路径
    """
    import os
    import datetime
    import sympy as sp
    import matplotlib.pyplot as plt
    import matplotlib.font_manager as fm
    import numpy as np

    try:
        # 处理中文编码
        title = ensure_utf8_encoding(title)
        
        if save_path is None:
            timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
            # 默认保存到模块目录
            module_dir = r"f:\\xzp\\fn-te\\project\\mcp-项智鹏-server\\模块三-插值与拟合模块"
            save_path = os.path.join(module_dir, f"interpolation_visualization_{timestamp}.png")

        # 确保文件路径安全
        save_path = safe_file_path(save_path)
        
        # 确保保存目录存在
        save_dir = os.path.dirname(save_path) if os.path.dirname(save_path) else "."
        os.makedirs(save_dir, exist_ok=True)

        if chart_type == "matplotlib":
            # 确保中文字体设置正确
            
            # 设置数学公式字体
            try:
                plt.rcParams['mathtext.fontset'] = 'stix'
                plt.rcParams['mathtext.rm'] = 'Times New Roman'
                plt.rcParams['mathtext.it'] = 'Times New Roman:italic'
                plt.rcParams['mathtext.bf'] = 'Times New Roman:bold'
            except Exception as e:
                print(f"数学字体设置失败: {e}")

            plt.figure(figsize=(10, 6), dpi=300)

            # 如果有插值多项式表达式，使用sympy进行数值计算
            if "symbolic_expression" in data and "original_points" in data:
                x = sp.symbols('x')
                polynomial = sp.sympify(data["symbolic_expression"])
                polynomial_func = sp.lambdify(x, polynomial, "numpy")

                x_min = min(data["original_points"]["x"])
                x_max = max(data["original_points"]["x"])
                x_vals = np.linspace(x_min, x_max, 400)

                y_vals = polynomial_func(x_vals)

                # 维度一致性检查
                if len(x_vals) != len(y_vals):
                    min_len = min(len(x_vals), len(y_vals))
                    x_vals = x_vals[:min_len]
                    y_vals = y_vals[:min_len]

                plt.plot(x_vals, y_vals, label=data.get("method", "插值曲线"), c='r', linewidth=2)
                plt.scatter(data["original_points"]["x"], data["original_points"]["y"],
                            c='r', s=60, label='原始数据点', zorder=5)

                if "specific_points" in data:
                    plt.scatter(data["specific_points"]["x"], data["specific_points"]["y"],
                                c='blue', s=80, label='插值计算点', marker='s', zorder=5)

            elif "original_points" in data:
                plt.scatter(data["original_points"]["x"], data["original_points"]["y"],
                            color='red', s=80, label='原始数据点')

                if "x_interpolated" in data and "y_interpolated" in data:
                    x_interp = data["x_interpolated"]
                    y_interp = data["y_interpolated"]

                    # 维度一致性修正
                    if len(x_interp) != len(y_interp):
                        min_len = min(len(x_interp), len(y_interp))
                        x_interp = x_interp[:min_len]
                        y_interp = y_interp[:min_len]
                        print(f"DEBUG: 维度修正 - x长度: {len(data['x_interpolated'])}, y长度: {len(data['y_interpolated'])}, 修正后长度: {min_len}")

                    plt.plot(x_interp, y_interp,
                             'b-', linewidth=2.5, label=data.get("method", "插值曲线"))

                if "target_points" in data:
                    plt.scatter(data["target_points"]["x"], data["target_points"]["y"],
                                color='green', s=100, label='插值计算点', marker='s')
            elif "original_data" in data:
                # 绘制原始数据点
                plt.scatter(data["original_data"]["x"], data["original_data"]["y"],
                            color='red', s=80, label='原始数据点')

                # 检查并绘制所有嵌套的拟合数据（例如 polynomial_data, exponential_data）
                for key, result in data.items():
                    if key in {"original_data", "comparison"}:
                        continue
                    if isinstance(result, dict) and "x_fitted" in result and "y_fitted" in result:
                        x_fit = result["x_fitted"]
                        y_fit = result["y_fitted"]
                        if len(x_fit) != len(y_fit):
                            min_len = min(len(x_fit), len(y_fit))
                            x_fit = x_fit[:min_len]
                            y_fit = y_fit[:min_len]
                            print(f"DEBUG: 拟合曲线维度修正 - {key} - 长度: {min_len}")
                        method_name = result.get("method", key)
                        r2 = result.get("r_squared")
                        rmse = result.get("rmse")
                        label = method_name
                        if r2 is not None:
                            label += f" ($R^2$={r2:.4f})"
                        if rmse is not None:
                            label += f", RMSE={rmse:.4f}"
                        plt.plot(x_fit, y_fit, linewidth=2.0, label=label)
            



            # 添加插值信息文本（如果有）
            if "interpolation_info" in data:
                info = data["interpolation_info"]
                info_text = f"原始点数: {info.get('original_points_count', '?')}\n插值点数: {info.get('interpolated_points_count', '?')}"
                plt.text(0.02, 0.98, info_text, transform=plt.gca().transAxes,
                         verticalalignment='top', bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.8))

            # 自动设置坐标轴范围，添加5%边距
            all_x = []
            all_y = []
            if "original_points" in data:
                all_x.extend(data["original_points"]["x"])
                all_y.extend(data["original_points"]["y"])
            if "x_interpolated" in data and "y_interpolated" in data:
                all_x.extend(data["x_interpolated"])
                all_y.extend(data["y_interpolated"])
            if all_x and all_y:
                x_min, x_max = min(all_x), max(all_x)
                y_min, y_max = min(all_y), max(all_y)
                x_margin = (x_max - x_min) * 0.05 if x_max > x_min else 1
                y_margin = (y_max - y_min) * 0.05 if y_max > y_min else 1
                plt.xlim(x_min - x_margin, x_max + x_margin)
                plt.ylim(y_min - y_margin, y_max + y_margin)

            # 处理标题中的R²符号，使用LaTeX渲染
            if 'R²' in title:
                title = title.replace('R²', r'$R^2$')
            elif 'R^2' in title:
                title = title.replace('R^2', r'$R^2$')
            
            plt.title(title, fontsize=16)
            plt.legend(fontsize=12)
            plt.grid(True, alpha=0.3)
            plt.tight_layout()

            # 保存图片，确保路径编码正确
            try:
                plt.savefig(save_path, format='png', dpi=300, bbox_inches='tight', 
                           facecolor='white', edgecolor='none')
                print(f"图片已成功保存到: {save_path}")
            except Exception as save_error:
                print(f"保存图片时出错: {save_error}")
                # 尝试使用备用路径
                backup_path = "backup_plot.png"
                plt.savefig(backup_path, format='png', dpi=300, bbox_inches='tight',
                           facecolor='white', edgecolor='none')
                save_path = backup_path
                print(f"已保存到备用路径: {backup_path}")
            finally:
                plt.close()

        elif chart_type == "plotly":
            import plotly.graph_objects as go
            fig = go.Figure()

            if "original_points" in data:
                fig.add_trace(go.Scatter(
                    x=data["original_points"]["x"], y=data["original_points"]["y"],
                    mode='markers', name='原始数据点', marker=dict(size=8, color='red')))
                if "x_interpolated" in data and "y_interpolated" in data:
                    x_interp = data["x_interpolated"]
                    y_interp = data["y_interpolated"]

                    # 维度一致性修正
                    if len(x_interp) != len(y_interp):
                        min_len = min(len(x_interp), len(y_interp))
                        x_interp = x_interp[:min_len]
                        y_interp = y_interp[:min_len]
                        print(f"DEBUG: Plotly维度修正 - x长度: {len(data['x_interpolated'])}, y长度: {len(data['y_interpolated'])}, 修正后长度: {min_len}")

                    fig.add_trace(go.Scatter(
                        x=x_interp, y=y_interp,
                        mode='lines', name=data.get("method", "插值曲线"),
                        line=dict(color='blue', width=2)))

            elif "original_data" in data:
                fig.add_trace(go.Scatter(
                    x=data["original_data"]["x"], y=data["original_data"]["y"],
                    mode='markers', name='原始数据点', marker=dict(size=8, color='red')))
                if "x_fitted" in data and "y_fitted" in data:
                    fig.add_trace(go.Scatter(
                        x=data["x_fitted"], y=data["y_fitted"],
                        mode='lines', name=data.get("method", "拟合曲线"),
                        line=dict(color='green', width=2)))

            # 设置布局，确保中文显示正确
            fig.update_layout(
                title=dict(text=title, font=dict(size=16)),
                xaxis_title='X', 
                yaxis_title='Y',
                font=dict(family="Microsoft YaHei, SimHei, Arial", size=12)
            )

            # 保存为PNG图像
            try:
                fig.write_image(save_path, format="png", width=1200, height=800, scale=2)
                print(f"Plotly图片已成功保存到: {save_path}")
            except Exception as e:
                print(f"Plotly图像保存失败：{e}")
                # 尝试使用备用路径
                backup_path = "backup_plotly.png"
                try:
                    fig.write_image(backup_path, format="png", width=1200, height=800, scale=2)
                    save_path = backup_path
                    print(f"已保存到备用路径: {backup_path}")
                except Exception as backup_error:
                    raise RuntimeError(f"Plotly图像保存完全失败：{backup_error}，请确认已安装 kaleido（pip install kaleido）")

        else:
            raise ValueError("不支持的图表类型: 请选择 'matplotlib' 或 'plotly'")

        # 确保返回的路径使用正确编码
        safe_path = ensure_utf8_encoding(save_path)
        return f"图像已保存到: {safe_path}"

    except Exception as e:
        error_msg = ensure_utf8_encoding(str(e))
        return f"错误：可视化生成失败 - {error_msg}"



@mcp.tool()
def evaluate_polynomial(polynomial: str, x_value: List[float]) -> Union[Dict[str, Any], str]:
    """
    计算多项式在给定x值处的函数值
    
    Args:
        polynomial: 多项式表达式字符串，如 "x**2 + 2*x + 1"
        x_value: x值列表
    
    Returns:
        计算结果
    """
    try:
        import sympy as sp
        
        print(f"DEBUG: evaluate_polynomial - 输入多项式: {polynomial}")
        print(f"DEBUG: evaluate_polynomial - 输入x值: {x_value}")
        
        # 解析多项式表达式
        x = sp.Symbol('x')
        poly_expr = sp.sympify(polynomial)
        
        print(f"DEBUG: evaluate_polynomial - 解析后的表达式: {poly_expr}")
        
        # 计算每个x值对应的函数值
        results = []
        for x_val in x_value:
            value = float(poly_expr.subs(x, x_val).evalf())
            results.append(value)
            print(f"DEBUG: evaluate_polynomial - x={x_val}, y={value}")
        
        result = {
            "polynomial": polynomial,
            "x_values": x_value,
            "results": results,
            "method": "多项式计算"
        }
        
        print(f"DEBUG: evaluate_polynomial - 返回结果: {result}")
        return result
        
    except Exception as e:
        error_msg = f"错误：多项式计算失败 - {str(e)}"
        print(f"DEBUG: evaluate_polynomial - 错误: {error_msg}")
        return error_msg

@mcp.tool()
def evaluate_fitting_accuracy(x_data: List[float], y_data: List[float], y_fitted: List[float]) -> Union[Dict[str, Any], str]:
    """
    评估拟合精度
    
    Args:
        x_data: 原始x数据
        y_data: 原始y数据
        y_fitted: 拟合的y数据
    
    Returns:
        详细的拟合精度评估结果
    """
    try:
        x_data = np.array(x_data)
        y_data = np.array(y_data)
        y_fitted = np.array(y_fitted)
        
        if len(y_data) != len(y_fitted):
            return "错误：原始数据和拟合数据长度不匹配"
        
        # 计算各种评估指标
        r_squared = r2_score(y_data, y_fitted)
        rmse = np.sqrt(mean_squared_error(y_data, y_fitted))
        mae = np.mean(np.abs(y_data - y_fitted))
        mape = np.mean(np.abs((y_data - y_fitted) / y_data)) * 100  # 平均绝对百分比误差
        
        # 残差分析
        residuals = y_data - y_fitted
        residual_std = np.std(residuals)
        residual_mean = np.mean(residuals)
        
        # 计算相关系数
        correlation = np.corrcoef(y_data, y_fitted)[0, 1]
        
        # 计算调整R²（需要知道参数个数，这里假设为2）
        n = len(y_data)
        p = 2  # 假设参数个数
        adjusted_r2 = 1 - (1 - r_squared) * (n - 1) / (n - p - 1)
        
        return {
            "r_squared": float(r_squared),
            "adjusted_r_squared": float(adjusted_r2),
            "rmse": float(rmse),
            "mae": float(mae),
            "mape": float(mape),
            "correlation": float(correlation),
            "residual_analysis": {
                "mean": float(residual_mean),
                "std": float(residual_std),
                "min": float(np.min(residuals)),
                "max": float(np.max(residuals))
            },
            "data_points": int(n),
            "fitting_quality": "优秀" if r_squared > 0.9 else "良好" if r_squared > 0.7 else "一般" if r_squared > 0.5 else "较差"
        }
        
    except Exception as e:
        return f"错误：拟合精度评估失败 - {str(e)}"


# ===================== 模块6 时间序列分析 ========================

# 假设这是MCP工具环境
@mcp.tool()
def time_series_stationarity_test(data: List[float], test_type: str = 'adf') -> Dict[str, Any]:
    """
    时间序列平稳性检验
    
    Args:
        data: 时间序列数据
        test_type: 检验类型 ('adf' - Augmented Dickey-Fuller)
    
    Returns:
        平稳性检验结果和可视化
    """
    try:
        if not STATSMODELS_AVAILABLE:
            return {"error": "statsmodels库未安装，无法进行时间序列分析"}
        
        # 转换为numpy数组
        ts_data = np.array(data, dtype=float)
        
        # 确保有足够的数据点
        if len(ts_data) < 10:
            return {"error": f"数据点不足({len(ts_data)})，至少需要10个点进行平稳性检验"}
        
        if test_type == 'adf':
            # ADF检验
            adf_result = adfuller(ts_data, autolag='AIC')
            statistic = float(adf_result[0])  # 修复：确保转换为Python float
            p_value = float(adf_result[1])    # 修复：确保转换为Python float
            critical_values = {
                '1%': float(adf_result[4]['1%']),
                '5%': float(adf_result[4]['5%']),
                '10%': float(adf_result[4]['10%'])
            }
            
            # 修复：使用临界值判断平稳性（更可靠）
            is_stationary = bool(statistic < critical_values['5%'])  # 修复：确保转换为Python bool
            
            # 创建可视化
            fig, axes = plt.subplots(2, 2, figsize=(14, 10))
            plt.suptitle('时间序列平稳性分析', fontsize=16, fontweight='bold')
            
            # 原始时间序列
            axes[0, 0].plot(ts_data, linewidth=2, color='blue')
            axes[0, 0].set_title('原始时间序列')
            axes[0, 0].set_xlabel('时间点')
            axes[0, 0].set_ylabel('观测值')
            axes[0, 0].grid(True, linestyle='--', alpha=0.7)
            
            # 滚动统计 - 修复：更合理的窗口大小
            window = max(3, min(10, len(ts_data) // 3))
            rolling_mean = pd.Series(ts_data).rolling(window=window).mean()
            rolling_std = pd.Series(ts_data).rolling(window=window).std()
            
            axes[0, 1].plot(ts_data, label='原始数据', alpha=0.7, color='blue')
            axes[0, 1].plot(rolling_mean, label=f'滚动均值(窗口={window})', color='red', linewidth=2)
            axes[0, 1].plot(rolling_std, label=f'滚动标准差(窗口={window})', color='green', linewidth=2)
            axes[0, 1].set_title('滚动统计')
            axes[0, 1].legend(loc='best')
            axes[0, 1].grid(True, linestyle='--', alpha=0.7)
            
            # 一阶差分 - 修复：处理短序列
            diff_data = np.diff(ts_data)
            if len(diff_data) > 1:
                axes[1, 0].plot(diff_data, color='purple', linewidth=2)
                axes[1, 0].set_title('一阶差分')
                axes[1, 0].set_xlabel('时间点')
                axes[1, 0].set_ylabel('差分值')
                axes[1, 0].grid(True, linestyle='--', alpha=0.7)
            else:
                axes[1, 0].axis('off')
                axes[1, 0].text(0.5, 0.5, '数据不足\n无法计算差分', 
                                ha='center', va='center', fontsize=12)
            
            # 检验结果摘要 - 改进：更详细的结果解释
            axes[1, 1].axis('off')
            
            # 添加ADF检验统计解释
            stat_explanation = (
                "ADF统计量衡量序列与单位根过程的偏差。\n"
                "负值越大，序列越可能是平稳的。\n"
                f"当前值: {statistic:.4f}"
            )
            
            # 添加结论解释
            conclusion_explanation = (
                f"序列{'平稳' if is_stationary else '非平稳'}\n\n"
                f"原因: ADF统计量({statistic:.4f}) "
                f"{'<' if is_stationary else '>'} "
                f"5%临界值({critical_values['5%']:.4f})"
            )
            
            result_text = f"""
ADF平稳性检验结果:

检验统计量: {statistic:.6f}
P值: {p_value:.6f}

临界值:
1%: {critical_values['1%']:.6f} (99%置信度平稳)
5%: {critical_values['5%']:.6f} (95%置信度平稳) → 决策边界
10%: {critical_values['10%']:.6f} (90%置信度平稳)

{conclusion_explanation}

统计量解释:
{stat_explanation}
            """
            
            # 根据结果使用不同颜色背景
            bg_color = "lightgreen" if is_stationary else "lightcoral"
            axes[1, 1].text(0.1, 0.5, result_text, fontsize=11, 
                    verticalalignment='center', linespacing=1.5,
                    bbox=dict(boxstyle="round,pad=0.8", facecolor=bg_color, alpha=0.8))
            
            plt.tight_layout(rect=[0, 0.03, 1, 0.95])
            
            # 将图表转换为base64
            plot_base64 = create_plot_base64(fig)
            
            return {
                'test_type': 'ADF',
                'statistic': statistic,
                'p_value': p_value,
                'critical_values': critical_values,
                'is_stationary': is_stationary,
                'conclusion': '平稳' if is_stationary else '非平稳',
                'decision_rule': f"ADF统计量 < {critical_values['5%']:.4f} (5%临界值)",
                'visualization': plot_base64
            }
        
        else:
            return {"error": f"不支持的检验类型: {test_type}"}
            
    except Exception as e:
        return {"error": f"平稳性检验错误: {str(e)}"}



@mcp.tool()
def time_series_decomposition(data: List[float], model: str = 'additive', period: int = 12) -> Dict[str, Any]:
    """
    时间序列分解
    
    Args:
        data: 时间序列数据
        model: 分解模型 ('additive', 'multiplicative')
        period: 季节性周期
    
    Returns:
        分解结果和可视化
    """
    try:
        if not STATSMODELS_AVAILABLE:
            return {"error": "statsmodels库未安装，无法进行时间序列分析"}
        
        if len(data) < 2 * period:
            return {"error": f"数据长度({len(data)})必须至少是周期({period})的2倍"}
        
        ts_data = pd.Series(data)
        
        # 时间序列分解
        decomposition = seasonal_decompose(ts_data, model=model, period=period)
        
        # 创建可视化
        fig, axes = plt.subplots(4, 1, figsize=(12, 10))
        
        # 原始序列
        axes[0].plot(decomposition.observed, linewidth=2, color='blue')
        axes[0].set_title('原始时间序列')
        axes[0].set_ylabel('观测值')
        axes[0].grid(True, alpha=0.3)
        
        # 趋势
        axes[1].plot(decomposition.trend, linewidth=2, color='red')
        axes[1].set_title('趋势成分')
        axes[1].set_ylabel('趋势')
        axes[1].grid(True, alpha=0.3)
        
        # 季节性
        axes[2].plot(decomposition.seasonal, linewidth=2, color='green')
        axes[2].set_title('季节性成分')
        axes[2].set_ylabel('季节性')
        axes[2].grid(True, alpha=0.3)
        
        # 残差
        axes[3].plot(decomposition.resid, linewidth=2, color='orange')
        axes[3].set_title('残差成分')
        axes[3].set_xlabel('时间')
        axes[3].set_ylabel('残差')
        axes[3].grid(True, alpha=0.3)
        
        plt.tight_layout()
        plot_base64 = create_plot_base64(fig)
        
        # 计算各成分的统计信息
        trend_stats = {
            'mean': float(np.nanmean(decomposition.trend)),
            'std': float(np.nanstd(decomposition.trend)),
            'min': float(np.nanmin(decomposition.trend)),
            'max': float(np.nanmax(decomposition.trend))
        }
        
        seasonal_stats = {
            'mean': float(np.mean(decomposition.seasonal)),
            'std': float(np.std(decomposition.seasonal)),
            'amplitude': float(np.max(decomposition.seasonal) - np.min(decomposition.seasonal))
        }
        
        resid_stats = {
            'mean': float(np.nanmean(decomposition.resid)),
            'std': float(np.nanstd(decomposition.resid)),
            'variance': float(np.nanvar(decomposition.resid))
        }
        
        return {
            'model': model,
            'period': period,
            'trend': decomposition.trend[~np.isnan(decomposition.trend)].tolist(),
            'seasonal': decomposition.seasonal.tolist(),
            'residual': decomposition.resid[~np.isnan(decomposition.resid)].tolist(),
            'trend_stats': trend_stats,
            'seasonal_stats': seasonal_stats,
            'residual_stats': resid_stats,
            'visualization': plot_base64
        }
        
    except Exception as e:
        return {"error": f"时间序列分解错误: {str(e)}"}

@mcp.tool()
def ar_model_analysis(data: List[float], lags: int = 5, method: str = 'ols') -> Dict[str, Any]:
    """
    AR(自回归)模型分析
    
    Args:
        data: 时间序列数据
        lags: 滞后阶数
        method: 估计方法 ('ols', 'mle')
    
    Returns:
        AR模型结果和预测
    """
    try:
        if not STATSMODELS_AVAILABLE:
            return {"error": "statsmodels库未安装，无法进行时间序列分析"}
        
        if len(data) <= lags + 1:
            return {"error": f"数据长度({len(data)})必须大于滞后阶数({lags}) + 1"}
        
        ts_data = np.array(data)
        
        # 拟合AR模型
        ar_model = AutoReg(ts_data, lags=lags, trend='c')
        ar_fitted = ar_model.fit()
        
        # 模型预测
        forecast_steps = min(10, len(data) // 4)
        forecast = ar_fitted.forecast(steps=forecast_steps)
        
        # 计算拟合值
        fitted_values = ar_fitted.fittedvalues
        residuals = ar_fitted.resid
        
        # 创建可视化
        fig, axes = plt.subplots(2, 2, figsize=(12, 8))
        
        # 原始数据和拟合值
        axes[0, 0].plot(ts_data, label='原始数据', linewidth=2, alpha=0.8)
        axes[0, 0].plot(range(lags, len(ts_data)), fitted_values, 
                       label='AR拟合值', linewidth=2, alpha=0.8)
        
        # 添加预测
        forecast_index = range(len(ts_data), len(ts_data) + forecast_steps)
        axes[0, 0].plot(forecast_index, forecast, 
                       label=f'预测({forecast_steps}步)', linewidth=2, linestyle='--')
        
        axes[0, 0].set_title(f'AR({lags})模型拟合和预测')
        axes[0, 0].set_xlabel('时间')
        axes[0, 0].set_ylabel('值')
        axes[0, 0].legend()
        axes[0, 0].grid(True, alpha=0.3)
        
        # 残差图
        axes[0, 1].plot(residuals, linewidth=2, color='red')
        axes[0, 1].axhline(y=0, color='black', linestyle='--', alpha=0.5)
        axes[0, 1].set_title('残差序列')
        axes[0, 1].set_xlabel('时间')
        axes[0, 1].set_ylabel('残差')
        axes[0, 1].grid(True, alpha=0.3)
        
        # ACF和PACF图
        if len(residuals) > 10:
            acf_values = acf(residuals, nlags=min(20, len(residuals)//4), fft=False)
            pacf_values = pacf(residuals, nlags=min(20, len(residuals)//4))
            
            axes[1, 0].stem(range(len(acf_values)), acf_values, basefmt=" ")
            axes[1, 0].axhline(y=0, color='black', linestyle='-', alpha=0.5)
            axes[1, 0].axhline(y=1.96/np.sqrt(len(residuals)), color='red', linestyle='--', alpha=0.5)
            axes[1, 0].axhline(y=-1.96/np.sqrt(len(residuals)), color='red', linestyle='--', alpha=0.5)
            axes[1, 0].set_title('残差ACF')
            axes[1, 0].set_xlabel('滞后')
            axes[1, 0].set_ylabel('ACF')
            axes[1, 0].grid(True, alpha=0.3)
            
            axes[1, 1].stem(range(len(pacf_values)), pacf_values, basefmt=" ")
            axes[1, 1].axhline(y=0, color='black', linestyle='-', alpha=0.5)
            axes[1, 1].axhline(y=1.96/np.sqrt(len(residuals)), color='red', linestyle='--', alpha=0.5)
            axes[1, 1].axhline(y=-1.96/np.sqrt(len(residuals)), color='red', linestyle='--', alpha=0.5)
            axes[1, 1].set_title('残差PACF')
            axes[1, 1].set_xlabel('滞后')
            axes[1, 1].set_ylabel('PACF')
            axes[1, 1].grid(True, alpha=0.3)
        else:
            axes[1, 0].text(0.5, 0.5, '数据不足\n无法计算ACF', ha='center', va='center')
            axes[1, 1].text(0.5, 0.5, '数据不足\n无法计算PACF', ha='center', va='center')
        
        plt.tight_layout()
        plot_base64 = create_plot_base64(fig)
        
        # 模型统计信息
        model_stats = {
            'aic': float(ar_fitted.aic),
            'bic': float(ar_fitted.bic),
            'llf': float(ar_fitted.llf),
            'rsquared': float(ar_fitted.rsquared) if hasattr(ar_fitted, 'rsquared') else None,
            'mse': float(np.mean(residuals**2))
        }
        
        return {
            'model_type': f'AR({lags})',
            'lags': lags,
            'coefficients': ar_fitted.params.tolist(),
            'fitted_values': fitted_values.tolist(),
            'residuals': residuals.tolist(),
            'forecast': forecast.tolist(),
            'model_stats': model_stats,
            'visualization': plot_base64
        }
        
    except Exception as e:
        return {"error": f"AR模型分析错误: {str(e)}"}

@mcp.tool()
def arima_model_analysis(data: List[float], order: Tuple[int, int, int] = (1, 1, 1), 
                        seasonal_order: Optional[Tuple[int, int, int, int]] = None) -> Dict[str, Any]:
    """
    ARIMA模型分析
    
    Args:
        data: 时间序列数据
        order: ARIMA模型阶数 (p, d, q)
        seasonal_order: 季节性ARIMA阶数 (P, D, Q, s)，可选
    
    Returns:
        ARIMA模型结果和预测
    """
    try:
        if not STATSMODELS_AVAILABLE:
            return {"error": "statsmodels库未安装，无法进行时间序列分析"}
        
        p, d, q = order
        min_length = max(p + d + q + 1, 10)
        
        if len(data) < min_length:
            return {"error": f"数据长度({len(data)})不足，至少需要{min_length}个观测值"}
        
        ts_data = np.array(data)
        
        # 拟合ARIMA模型
        if seasonal_order:
            arima_model = ARIMA(ts_data, order=order, seasonal_order=seasonal_order)
        else:
            arima_model = ARIMA(ts_data, order=order)
        
        arima_fitted = arima_model.fit()
        
        # 模型预测
        forecast_steps = min(10, len(data) // 4)
        forecast_result = arima_fitted.forecast(steps=forecast_steps)
        forecast_ci = arima_fitted.get_forecast(steps=forecast_steps).conf_int()
        
        # 计算拟合值和残差
        fitted_values = arima_fitted.fittedvalues
        residuals = arima_fitted.resid
        
        # 创建可视化
        fig, axes = plt.subplots(2, 2, figsize=(12, 8))
        
        # 原始数据、拟合值和预测
        axes[0, 0].plot(ts_data, label='原始数据', linewidth=2, alpha=0.8)
        axes[0, 0].plot(fitted_values, label='ARIMA拟合值', linewidth=2, alpha=0.8)
        
        # 添加预测和置信区间
        forecast_index = range(len(ts_data), len(ts_data) + forecast_steps)
        axes[0, 0].plot(forecast_index, forecast_result, 
                       label=f'预测({forecast_steps}步)', linewidth=2, linestyle='--')
        
        if len(forecast_ci) > 0:
            axes[0, 0].fill_between(forecast_index, 
                                   forecast_ci[:, 0], forecast_ci[:, 1],
                                   alpha=0.3, label='95%置信区间')
        
        model_name = f'ARIMA{order}'
        if seasonal_order:
            model_name += f'×{seasonal_order}'
        
        axes[0, 0].set_title(f'{model_name}模型拟合和预测')
        axes[0, 0].set_xlabel('时间')
        axes[0, 0].set_ylabel('值')
        axes[0, 0].legend()
        axes[0, 0].grid(True, alpha=0.3)
        
        # 残差图
        axes[0, 1].plot(residuals, linewidth=2, color='red')
        axes[0, 1].axhline(y=0, color='black', linestyle='--', alpha=0.5)
        axes[0, 1].set_title('残差序列')
        axes[0, 1].set_xlabel('时间')
        axes[0, 1].set_ylabel('残差')
        axes[0, 1].grid(True, alpha=0.3)
        
        # 残差Q-Q图
        from scipy import stats
        stats.probplot(residuals, dist="norm", plot=axes[1, 0])
        axes[1, 0].set_title('残差Q-Q图')
        axes[1, 0].grid(True, alpha=0.3)
        
        # 模型诊断信息
        axes[1, 1].axis('off')
        
        # 计算Ljung-Box检验
        try:
            from statsmodels.stats.diagnostic import acorr_ljungbox
            lb_test = acorr_ljungbox(residuals, lags=min(10, len(residuals)//4), return_df=True)
            lb_pvalue = lb_test['lb_pvalue'].iloc[-1]
        except:
            lb_pvalue = None
        
        result_text = f"""
{model_name}模型诊断:

模型信息:
AIC: {arima_fitted.aic:.4f}
BIC: {arima_fitted.bic:.4f}
对数似然: {arima_fitted.llf:.4f}

残差统计:
均值: {np.mean(residuals):.6f}
标准差: {np.std(residuals):.6f}
偏度: {stats.skew(residuals):.4f}
峰度: {stats.kurtosis(residuals):.4f}

{f'Ljung-Box检验 p值: {lb_pvalue:.4f}' if lb_pvalue is not None else ''}
        """
        
        axes[1, 1].text(0.1, 0.5, result_text, fontsize=10, verticalalignment='center',
                bbox=dict(boxstyle="round,pad=0.3", facecolor="lightblue", alpha=0.8))
        
        plt.tight_layout()
        plot_base64 = create_plot_base64(fig)
        
        # 模型统计信息
        model_stats = {
            'aic': float(arima_fitted.aic),
            'bic': float(arima_fitted.bic),
            'llf': float(arima_fitted.llf),
            'mse': float(np.mean(residuals**2)),
            'mae': float(np.mean(np.abs(residuals))),
            'ljung_box_pvalue': float(lb_pvalue) if lb_pvalue is not None else None
        }
        
        return {
            'model_type': model_name,
            'order': order,
            'seasonal_order': seasonal_order,
            'coefficients': arima_fitted.params.tolist(),
            'fitted_values': fitted_values.tolist(),
            'residuals': residuals.tolist(),
            'forecast': forecast_result.tolist(),
            'forecast_ci_lower': forecast_ci[:, 0].tolist() if len(forecast_ci) > 0 else [],
            'forecast_ci_upper': forecast_ci[:, 1].tolist() if len(forecast_ci) > 0 else [],
            'model_stats': model_stats,
            'visualization': plot_base64
        }
        
    except Exception as e:
        return {"error": f"ARIMA模型分析错误: {str(e)}"}

@mcp.tool()
def time_series_auto_arima(data: List[float], max_p: int = 3, max_d: int = 2, max_q: int = 3, 
                           seasonal: bool = False, m: int = 12, timeout: int = 60) -> Dict[str, Any]:
    """
    智能自动ARIMA模型选择 - 增强版
    
    Args:
        data: 时间序列数据
        max_p: 最大AR阶数 (默认3，平衡性能与准确性)
        max_d: 最大差分阶数 (默认2，通常足够)
        max_q: 最大MA阶数 (默认3，平衡性能与准确性)
        seasonal: 是否考虑季节性
        m: 季节性周期
        timeout: 超时时间(秒)
    
    Returns:
        最优ARIMA模型结果和详细分析
    """
    import time
    import warnings
    from concurrent.futures import ThreadPoolExecutor, TimeoutError as FutureTimeoutError
    
    def detect_periodicity(ts_data):
        """智能检测时间序列的周期性"""
        try:
            from scipy.fft import fft
            from scipy.signal import find_peaks
            
            # FFT频域分析
            fft_vals = np.abs(fft(ts_data - np.mean(ts_data)))
            freqs = np.fft.fftfreq(len(ts_data))
            
            # 找到主要频率成分
            peaks, _ = find_peaks(fft_vals[:len(fft_vals)//2], height=np.max(fft_vals)*0.1)
            
            if len(peaks) > 0:
                dominant_freq = freqs[peaks[np.argmax(fft_vals[peaks])]]
                if dominant_freq > 0:
                    period = int(1 / dominant_freq)
                    if 2 <= period <= len(ts_data) // 3:
                        return period
            
            # 备选方法：自相关分析
            autocorr = np.correlate(ts_data, ts_data, mode='full')
            autocorr = autocorr[len(autocorr)//2:]
            autocorr = autocorr / autocorr[0]
            
            # 寻找第一个显著的正峰值
            for i in range(2, min(len(autocorr), len(ts_data)//3)):
                if autocorr[i] > 0.3 and autocorr[i] > autocorr[i-1] and autocorr[i] > autocorr[i+1]:
                    return i
                    
            return m  # 默认周期
        except:
            return m
    
    def check_stationarity(ts_data):
        """检查平稳性并建议差分阶数"""
        try:
            # ADF检验
            adf_result = adfuller(ts_data, autolag='AIC')
            is_stationary = adf_result[1] < 0.05
            
            if is_stationary:
                return 0, "序列已平稳"
            
            # 尝试一阶差分
            diff1 = np.diff(ts_data)
            if len(diff1) > 10:
                adf_result1 = adfuller(diff1, autolag='AIC')
                if adf_result1[1] < 0.05:
                    return 1, "一阶差分后平稳"
            
            # 尝试二阶差分
            if len(diff1) > 10:
                diff2 = np.diff(diff1)
                if len(diff2) > 10:
                    adf_result2 = adfuller(diff2, autolag='AIC')
                    if adf_result2[1] < 0.05:
                        return 2, "二阶差分后平稳"
            
            return 1, "建议一阶差分"
        except:
            return 1, "默认一阶差分"
    
    def fit_arima_model(order, seasonal_order, ts_data):
        """安全且高效地拟合ARIMA模型"""
        try:
            with warnings.catch_warnings():
                warnings.simplefilter("ignore")
                
                # 智能参数检查
                p, d, q = order
                if p + q > 8:  # 适度放宽限制
                    return None
                    
                if seasonal_order:
                    P, D, Q, s = seasonal_order
                    if P + Q > 6 or s > len(ts_data) // 3:  # 季节性参数检查
                        return None
                
                # 多种拟合方法尝试
                methods = ['lbfgs', 'bfgs', 'nm']
                
                for method in methods:
                    try:
                        if seasonal_order:
                            model = ARIMA(ts_data, order=order, seasonal_order=seasonal_order,
                                        enforce_stationarity=False, enforce_invertibility=False)
                        else:
                            model = ARIMA(ts_data, order=order,
                                        enforce_stationarity=False, enforce_invertibility=False)
                        
                        fitted = model.fit(method=method, maxiter=100, disp=False)
                        
                        # 严格的收敛性检查
                        if hasattr(fitted, 'mle_retvals') and not fitted.mle_retvals.get('converged', True):
                            continue
                            
                        # 检查模型有效性
                        if not (np.isfinite(fitted.aic) and np.isfinite(fitted.bic)):
                            continue
                            
                        # 检查残差的合理性
                        residuals = fitted.resid
                        if np.any(np.isnan(residuals)) or np.std(residuals) == 0:
                            continue
                            
                        return {
                            'order': order,
                            'seasonal_order': seasonal_order,
                            'aic': float(fitted.aic),
                            'bic': float(fitted.bic),
                            'llf': float(fitted.llf),
                            'model': fitted,
                            'method': method,
                            'residual_std': float(np.std(residuals))
                        }
                        
                    except Exception:
                        continue
                        
                return None
                
        except Exception:
            return None
    
    try:
        if not STATSMODELS_AVAILABLE:
            return {"error": "statsmodels库未安装，无法进行时间序列分析"}
        
        # 增强的数据验证
        if len(data) < 15:  # 降低最小要求，提高适用性
            return {"error": "数据长度不足，至少需要15个观测值进行自动模型选择"}
            
        if seasonal and len(data) < 2 * m:
            return {"error": f"季节性分析需要至少{2*m}个观测值"}
        
        ts_data = np.array(data, dtype=float)
        
        # 检查数据中是否有异常值
        if np.any(np.isnan(ts_data)) or np.any(np.isinf(ts_data)):
            return {"error": "数据包含NaN或无穷大值，请先清理数据"}
        
        # 智能平稳性检测
        suggested_d, stationarity_msg = check_stationarity(ts_data)
        
        # 智能周期性检测
        if seasonal:
            detected_period = detect_periodicity(ts_data)
            if detected_period != m and 2 <= detected_period <= len(ts_data) // 3:
                m = detected_period
        
        # 自适应调整搜索范围
        data_len = len(data)
        if data_len < 50:
            max_p = min(max_p, 2)
            max_q = min(max_q, 2)
            max_d = min(max_d, 1)
        elif data_len < 100:
            max_p = min(max_p, 3)
            max_q = min(max_q, 3)
        
        # 使用建议的差分阶数
        if suggested_d <= max_d:
            max_d = min(max_d, suggested_d + 1)
            
        start_time = time.time()
        best_aic = np.inf
        best_model_info = None
        results = []
        
        # 智能优先级配置（基于平稳性检测结果）
        priority_configs = [
            # 基于检测到的差分阶数的基础模型
            ((1, suggested_d, 1), None),
            ((0, suggested_d, 1), None),
            ((1, suggested_d, 0), None),
            ((2, suggested_d, 1), None),
            # 传统常用配置
            ((1, 1, 1), None),
            ((0, 1, 1), None),
            ((2, 1, 2), None),
        ]
        
        # 智能季节性配置
        if seasonal and len(data) > 2 * m:
            seasonal_configs = [
                ((1, suggested_d, 1), (1, 1, 1, m)),
                ((0, suggested_d, 1), (0, 1, 1, m)),
                ((1, suggested_d, 0), (1, 1, 0, m)),
                ((1, 1, 1), (1, 1, 1, m)),
                ((0, 1, 1), (0, 1, 1, m)),
            ]
            priority_configs.extend(seasonal_configs)
        
        # 去重并过滤有效配置
        valid_priority_configs = []
        seen_configs = set()
        for order, seasonal_order in priority_configs:
            p, d, q = order
            if (p <= max_p and d <= max_d and q <= max_q and 
                (order, seasonal_order) not in seen_configs):
                valid_priority_configs.append((order, seasonal_order))
                seen_configs.add((order, seasonal_order))
        
        # 首先尝试优先配置
        for order, seasonal_order in valid_priority_configs:
            if time.time() - start_time > timeout * 0.4:  # 40%时间用于优先配置
                break
                
            result = fit_arima_model(order, seasonal_order, ts_data)
            if result:
                results.append(result)
                if result['aic'] < best_aic:
                    best_aic = result['aic']
                    best_model_info = result
        
        # 智能全面搜索（如果时间允许）
        if time.time() - start_time < timeout * 0.7:
            search_configs = []
            searched_configs = set(valid_priority_configs)
            
            # 生成智能搜索配置
            # 1. 围绕建议差分阶数的配置
            for p in range(max_p + 1):
                for q in range(max_q + 1):
                    for d_offset in [-1, 0, 1]:  # 在建议差分阶数周围搜索
                        d = max(0, min(max_d, suggested_d + d_offset))
                        config = ((p, d, q), None)
                        if config not in searched_configs:
                            search_configs.append(config)
                            searched_configs.add(config)
            
            # 2. 季节性模型的智能搜索
            if seasonal and len(data) > 2 * m:
                for p in range(min(3, max_p + 1)):  # 限制搜索范围
                    for d in range(min(2, max_d + 1)):
                        for q in range(min(3, max_q + 1)):
                            for P in range(min(2, max_p + 1)):
                                for D in range(min(2, max_d + 1)):
                                    for Q in range(min(2, max_q + 1)):
                                        config = ((p, d, q), (P, D, Q, m))
                                        if config not in searched_configs:
                                            search_configs.append(config)
                                            searched_configs.add(config)
            
            # 3. 基于当前最优模型的邻近搜索
            if best_model_info:
                best_p, best_d, best_q = best_model_info['order']
                for p_offset in [-1, 0, 1]:
                    for q_offset in [-1, 0, 1]:
                        p = max(0, min(max_p, best_p + p_offset))
                        q = max(0, min(max_q, best_q + q_offset))
                        config = ((p, best_d, q), None)
                        if config not in searched_configs:
                            search_configs.append(config)
                            searched_configs.add(config)
            
            # 智能排序：优先尝试参数较少的模型
            search_configs.sort(key=lambda x: sum(x[0]) + (sum(x[1][:3]) if x[1] else 0))
            
            # 限制搜索数量以控制时间
            max_search = min(len(search_configs), 60)
            
            for i, (order, seasonal_order) in enumerate(search_configs[:max_search]):
                if time.time() - start_time > timeout * 0.85:  # 85%时间限制
                    break
                    
                result = fit_arima_model(order, seasonal_order, ts_data)
                if result:
                    results.append(result)
                    if result['aic'] < best_aic:
                        best_aic = result['aic']
                        best_model_info = result
        
        if best_model_info is None or len(results) == 0:
            # 如果没有找到合适的模型，尝试最简单的配置
            simple_result = fit_arima_model((1, 1, 1), None, ts_data)
            if simple_result:
                best_model_info = simple_result
                results = [simple_result]
            else:
                return {"error": "无法找到收敛的ARIMA模型，请检查数据质量"}
        
        best_model = best_model_info['model']
        best_order = best_model_info['order']
        best_seasonal_order = best_model_info['seasonal_order']
        
        # 安全地进行预测
        try:
            forecast_steps = min(12, len(data) // 6)  # 更保守的预测步数
            forecast_result = best_model.forecast(steps=forecast_steps)
            
            # 获取置信区间，如果失败则跳过
            try:
                forecast_ci = best_model.get_forecast(steps=forecast_steps).conf_int()
            except:
                forecast_ci = np.array([])  # 如果置信区间计算失败，返回空数组
                
        except Exception as e:
            forecast_steps = 1
            forecast_result = best_model.forecast(steps=1)
            forecast_ci = np.array([])
        
        fitted_values = best_model.fittedvalues
        residuals = best_model.resid
        
        # 创建增强的可视化
        try:
            fig = plt.figure(figsize=(16, 12))
            gs = fig.add_gridspec(3, 3, hspace=0.3, wspace=0.3)
            
            model_name = f'ARIMA{best_order}'
            if best_seasonal_order:
                model_name += f'×{best_seasonal_order}'
            
            # 1. 模型拟合和预测 (大图)
            ax1 = fig.add_subplot(gs[0, :])
            ax1.plot(ts_data, label='原始数据', linewidth=2, alpha=0.8, color='blue')
            ax1.plot(fitted_values, label='模型拟合', linewidth=2, alpha=0.8, color='red')
            
            forecast_index = range(len(ts_data), len(ts_data) + forecast_steps)
            ax1.plot(forecast_index, forecast_result, 
                    label=f'预测({forecast_steps}步)', linewidth=2, linestyle='--', color='green')
            
            if len(forecast_ci) > 0:
                ax1.fill_between(forecast_index, 
                               forecast_ci[:, 0], forecast_ci[:, 1],
                               alpha=0.3, label='95%置信区间', color='green')
            
            ax1.set_title(f'智能自动选择的最优模型: {model_name}\n平稳性: {stationarity_msg}', fontsize=14)
            ax1.set_xlabel('时间')
            ax1.set_ylabel('值')
            ax1.legend()
            ax1.grid(True, alpha=0.3)
            
            # 2. 残差分析
            ax2 = fig.add_subplot(gs[1, 0])
            ax2.plot(residuals, linewidth=1.5, color='red', alpha=0.8)
            ax2.axhline(y=0, color='black', linestyle='--', alpha=0.5)
            ax2.set_title('残差序列')
            ax2.set_xlabel('时间')
            ax2.set_ylabel('残差')
            ax2.grid(True, alpha=0.3)
            
            # 3. 残差Q-Q图
            ax3 = fig.add_subplot(gs[1, 1])
            try:
                from scipy import stats
                stats.probplot(residuals, dist="norm", plot=ax3)
                ax3.set_title('残差Q-Q图')
                ax3.grid(True, alpha=0.3)
            except:
                ax3.hist(residuals, bins=20, alpha=0.7, color='red', edgecolor='black')
                ax3.set_title('残差分布')
                ax3.set_xlabel('残差值')
                ax3.set_ylabel('频数')
                ax3.grid(True, alpha=0.3)
            
            # 4. ACF/PACF图
            ax4 = fig.add_subplot(gs[1, 2])
            try:
                from statsmodels.tsa.stattools import acf, pacf
                
                # 计算ACF和PACF
                lags = min(20, len(residuals) // 4)
                acf_vals = acf(residuals, nlags=lags, alpha=0.05)
                
                # 绘制ACF
                ax4.stem(range(len(acf_vals[0])), acf_vals[0], basefmt=" ")
                ax4.fill_between(range(len(acf_vals[0])), acf_vals[1][:, 0], acf_vals[1][:, 1], 
                               alpha=0.3, color='blue')
                ax4.set_title('残差ACF')
                ax4.set_xlabel('滞后期')
                ax4.set_ylabel('自相关系数')
                ax4.grid(True, alpha=0.3)
            except:
                # 如果ACF计算失败，显示残差自相关
                ax4.plot(np.correlate(residuals, residuals, mode='full')[len(residuals)-10:len(residuals)+10])
                ax4.set_title('残差自相关')
                ax4.grid(True, alpha=0.3)
            
            # 5. 模型比较
            ax5 = fig.add_subplot(gs[2, 0])
            if len(results) > 1:
                top_models = sorted(results, key=lambda x: x['aic'])[:min(8, len(results))]
                model_names = []
                aics = []
                
                for model_info in top_models:
                    name = f"ARIMA{model_info['order']}"
                    if model_info['seasonal_order']:
                        name += f"×{model_info['seasonal_order'][:3]}"
                    model_names.append(name)
                    aics.append(model_info['aic'])
                
                bars = ax5.barh(range(len(model_names)), aics, alpha=0.7, 
                               color=['red' if i == 0 else 'lightblue' for i in range(len(aics))])
                ax5.set_yticks(range(len(model_names)))
                ax5.set_yticklabels(model_names, fontsize=9)
                ax5.set_xlabel('AIC值')
                ax5.set_title(f'模型比较 (前{len(model_names)}个)')
                ax5.grid(True, alpha=0.3)
                
                # 标注最优模型
                ax5.text(aics[0], 0, f' 最优: {aics[0]:.2f}', va='center', fontweight='bold')
            else:
                ax5.text(0.5, 0.5, '只找到一个收敛模型', 
                        ha='center', va='center', transform=ax5.transAxes)
                ax5.set_title('模型比较')
            
            # 6. 模型诊断信息
            ax6 = fig.add_subplot(gs[2, 1:])
            ax6.axis('off')
            
            # 计算增强的模型诊断统计
            ljung_box_p = None
            jarque_bera_p = None
            durbin_watson = None
            
            try:
                from statsmodels.stats.diagnostic import acorr_ljungbox
                lb_result = acorr_ljungbox(residuals, lags=min(10, len(residuals)//4), return_df=True)
                ljung_box_p = lb_result['lb_pvalue'].iloc[-1]
            except:
                pass
            
            try:
                from statsmodels.stats.stattools import jarque_bera
                jb_stat, jb_p, _, _ = jarque_bera(residuals)
                jarque_bera_p = jb_p
            except:
                pass
            
            try:
                from statsmodels.stats.stattools import durbin_watson as dw
                durbin_watson = dw(residuals)
            except:
                pass
            
            # 计算预测准确性指标
            mape = np.mean(np.abs((ts_data[1:] - fitted_values[1:]) / ts_data[1:])) * 100 if len(fitted_values) > 1 else 0
            rmse = np.sqrt(np.mean((ts_data - fitted_values) ** 2))
            mae = np.mean(np.abs(ts_data - fitted_values))
            
            result_text = f"""
🎯 智能自动ARIMA模型选择结果

📊 最优模型: {model_name}
   AIC: {best_model.aic:.4f}
   BIC: {best_model.bic:.4f}
   对数似然: {best_model.llf:.4f}
   拟合方法: {best_model_info.get('method', 'lbfgs')}

🔍 搜索统计:
   评估模型数量: {len(results)}
   搜索时间: {time.time() - start_time:.2f}秒
   搜索范围: p≤{max_p}, d≤{max_d}, q≤{max_q}
   建议差分阶数: {suggested_d}
   季节性: {'是' if seasonal else '否'}
   {f'检测周期: {m}' if seasonal else ''}

📈 预测准确性:
   RMSE: {rmse:.4f}
   MAE: {mae:.4f}
   MAPE: {mape:.2f}%

🧪 模型诊断:
   {f'Ljung-Box检验 p值: {ljung_box_p:.4f}' if ljung_box_p is not None else 'Ljung-Box检验: 未计算'}
   {f'Jarque-Bera检验 p值: {jarque_bera_p:.4f}' if jarque_bera_p is not None else 'Jarque-Bera检验: 未计算'}
   {f'Durbin-Watson统计量: {durbin_watson:.4f}' if durbin_watson is not None else 'Durbin-Watson: 未计算'}
   残差标准差: {np.std(residuals):.4f}
            """
            
            ax6.text(0.05, 0.95, result_text, fontsize=10, verticalalignment='top',
                    bbox=dict(boxstyle="round,pad=0.5", facecolor="lightblue", alpha=0.8),
                    transform=ax6.transAxes, family='monospace')
            
            plt.tight_layout()
            plot_base64 = create_plot_base64(fig)
            
        except Exception as e:
            plot_base64 = None
            print(f"可视化创建失败: {e}")
        
        # 计算最终的预测准确性指标
        mape = np.mean(np.abs((ts_data[1:] - fitted_values[1:]) / ts_data[1:])) * 100 if len(fitted_values) > 1 else 0
        rmse = np.sqrt(np.mean((ts_data - fitted_values) ** 2))
        mae = np.mean(np.abs(ts_data - fitted_values))
        
        return {
            'best_model': model_name,
            'best_order': best_order,
            'best_seasonal_order': best_seasonal_order,
            'best_aic': float(best_aic),
            'best_bic': float(best_model.bic),
            'best_llf': float(best_model.llf),
            'fitting_method': best_model_info.get('method', 'lbfgs'),
            'converged': True,  # 由于我们的增强检查，到这里的模型都是收敛的
            
            # 智能检测结果
            'stationarity_analysis': {
                'suggested_d': suggested_d,
                'stationarity_message': stationarity_msg,
                'detected_period': m if seasonal else None
            },
            
            # 搜索统计
            'search_statistics': {
                'search_time': round(time.time() - start_time, 2),
                'models_evaluated': len(results),
                'search_range': f'p≤{max_p}, d≤{max_d}, q≤{max_q}',
                'seasonal_enabled': seasonal
            },
            
            # 预测准确性指标
            'accuracy_metrics': {
                'rmse': float(rmse),
                'mae': float(mae),
                'mape': float(mape),
                'residual_std': float(np.std(residuals))
            },
            
            # 模型诊断
            'diagnostics': {
                'ljung_box_p': float(ljung_box_p) if ljung_box_p is not None else None,
                'jarque_bera_p': float(jarque_bera_p) if jarque_bera_p is not None else None,
                'durbin_watson': float(durbin_watson) if durbin_watson is not None else None
            },
            
            # 模型比较
            'model_comparison': sorted(results, key=lambda x: x['aic'])[:5],
            
            # 时间序列数据
            'fitted_values': fitted_values.tolist(),
            'residuals': residuals.tolist(),
            'forecast': forecast_result.tolist(),
            'forecast_steps': forecast_steps,
            'forecast_ci_lower': forecast_ci[:, 0].tolist() if len(forecast_ci) > 0 else [],
            'forecast_ci_upper': forecast_ci[:, 1].tolist() if len(forecast_ci) > 0 else [],
            
            # 可视化
            'visualization': plot_base64
        }
        
    except Exception as e:
        return {"error": f"自动ARIMA模型选择错误: {str(e)}"}

@mcp.tool()
def seasonal_decomposition(data: List[float], model: str = 'additive', period: Optional[int] = None, 
                          extrapolate_trend: str = 'freq') -> Dict[str, Any]:
    """
    时间序列季节性分解
    
    Args:
        data: 时间序列数据
        model: 分解模型类型 ('additive', 'multiplicative')
        period: 季节性周期
        extrapolate_trend: 趋势外推方法
    
    Returns:
        分解结果和可视化
    """
    try:
        if not STATSMODELS_AVAILABLE:
            return {"error": "statsmodels库未安装，无法进行季节性分解"}
        
        from statsmodels.tsa.seasonal import seasonal_decompose
        
        # 转换数据格式
        ts_data = np.array(data)
        
        # 自动检测周期
        if period is None:
            if len(ts_data) >= 24:
                period = min(12, len(ts_data) // 2)
            else:
                period = max(2, len(ts_data) // 4)
        
        # 执行季节性分解
        decomposition = seasonal_decompose(
            ts_data, 
            model=model, 
            period=period,
            extrapolate_trend=extrapolate_trend
        )
        
        # 创建可视化
        fig, axes = plt.subplots(4, 1, figsize=(15, 12))
        fig.suptitle(f'时间序列{model}分解 (周期={period})', fontsize=16)
        
        # 原始数据
        axes[0].plot(decomposition.observed, label='原始数据', linewidth=2)
        axes[0].set_title('原始时间序列')
        axes[0].legend()
        axes[0].grid(True, alpha=0.3)
        
        # 趋势分量
        axes[1].plot(decomposition.trend, label='趋势分量', color='orange', linewidth=2)
        axes[1].set_title('趋势分量')
        axes[1].legend()
        axes[1].grid(True, alpha=0.3)
        
        # 季节性分量
        axes[2].plot(decomposition.seasonal, label='季节性分量', color='green', linewidth=2)
        axes[2].set_title('季节性分量')
        axes[2].legend()
        axes[2].grid(True, alpha=0.3)
        
        # 残差分量
        axes[3].plot(decomposition.resid, label='残差分量', color='red', linewidth=2)
        axes[3].set_title('残差分量')
        axes[3].legend()
        axes[3].grid(True, alpha=0.3)
        
        plt.tight_layout()
        plot_base64 = create_plot_base64(fig)
        
        # 计算分解质量指标 - 修复：正确处理pandas Series
        trend_data = decomposition.trend[~np.isnan(decomposition.trend)]
        seasonal_data = decomposition.seasonal[~np.isnan(decomposition.seasonal)]
        resid_data = decomposition.resid[~np.isnan(decomposition.resid)]
        
        if len(trend_data) > 0 and len(resid_data) > 0:
            trend_strength = max(0, 1 - np.var(resid_data) / np.var(trend_data + resid_data))
        else:
            trend_strength = 0
            
        if len(seasonal_data) > 0 and len(resid_data) > 0:
            seasonal_strength = max(0, 1 - np.var(resid_data) / np.var(seasonal_data + resid_data))
        else:
            seasonal_strength = 0
        
        return {
            "decomposition_type": model,
            "period": period,
            "trend": trend_data.tolist(),
            "seasonal": seasonal_data.tolist(),
            "residual": resid_data.tolist(),
            "trend_strength": float(trend_strength),
            "seasonal_strength": float(seasonal_strength),
            "visualization": plot_base64
        }
        
    except Exception as e:
        return {"error": f"季节性分解错误: {str(e)}"}


@mcp.tool()
def time_series_classification(X_data: List[List[float]], y_data: List[int], 
                               method: str = 'intelligent_features', test_size: float = 0.3, 
                               random_state: int = 42, algorithm: str = 'auto_select',
                               auto_tune: bool = True, feature_selection: bool = True) -> Dict[str, Any]:
    """
    智能增强时间序列分类 - 完善版
    
    Args:
        X_data: 时间序列特征数据 (样本数 x 时间步长)
        y_data: 标签数据
        method: 特征提取方法 ('statistical', 'intelligent_features', 'dtw_features', 'raw', 'wavelet_features', 'ensemble_features')
        test_size: 测试集比例
        random_state: 随机种子
        algorithm: 分类算法 ('random_forest', 'xgboost', 'svm', 'ensemble', 'auto_select')
        auto_tune: 是否自动调参
        feature_selection: 是否进行特征选择
    
    Returns:
        分类结果和评估指标
    """
    try:
        from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV
        from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, VotingClassifier
        from sklearn.svm import SVC
        from sklearn.metrics import classification_report, confusion_matrix, accuracy_score, roc_auc_score
        from sklearn.preprocessing import StandardScaler, LabelEncoder
        from sklearn.feature_selection import SelectKBest, f_classif, RFE
        from sklearn.decomposition import PCA
        from scipy import stats
        from scipy.fft import fft
        import seaborn as sns
    except ImportError:
        return {"error": "scikit-learn库未安装，无法进行分类分析"}
    
    # 智能特征提取辅助函数
    def extract_wavelet_features(series, wavelet='db4', levels=3):
        """提取小波特征"""
        try:
            import pywt
            coeffs = pywt.wavedec(series, wavelet, level=levels)
            features = []
            for coeff in coeffs:
                if len(coeff) > 0:
                    features.extend([
                        np.mean(coeff), np.std(coeff), np.max(coeff), np.min(coeff),
                        np.median(coeff), stats.skew(coeff), stats.kurtosis(coeff)
                    ])
            return features
        except ImportError:
            # 如果没有pywt，使用FFT作为替代
            fft_vals = np.abs(fft(series))
            fft_vals = fft_vals[:len(fft_vals)//2]
            if len(fft_vals) > 0:
                return [
                    np.mean(fft_vals), np.std(fft_vals), np.max(fft_vals), np.min(fft_vals),
                    np.median(fft_vals), stats.skew(fft_vals), stats.kurtosis(fft_vals)
                ]
            return [0] * 7
    
    def extract_advanced_statistical_features(series):
        """提取高级统计特征"""
        features = []
        
        # 基础统计特征
        features.extend([
            np.mean(series), np.std(series), np.var(series),
            np.max(series), np.min(series), np.median(series),
            np.percentile(series, 25), np.percentile(series, 75),
            stats.skew(series), stats.kurtosis(series)
        ])
        
        # 分布特征
        features.extend([
            np.max(series) - np.min(series),  # 范围
            np.mean(np.abs(series - np.mean(series))),  # 平均绝对偏差
            len(series[series > np.mean(series)]) / len(series),  # 高于均值的比例
            np.sum(series > np.percentile(series, 75)) / len(series),  # 高于75%分位数的比例
            np.sum(series < np.percentile(series, 25)) / len(series),  # 低于25%分位数的比例
        ])
        
        # 变化和趋势特征
        diff_series = np.diff(series)
        if len(diff_series) > 0:
            features.extend([
                np.mean(diff_series), np.std(diff_series),
                np.sum(diff_series > 0) / len(diff_series),  # 上升比例
                np.sum(np.abs(diff_series) > np.std(diff_series)) / len(diff_series),  # 异常变化比例
                np.max(diff_series), np.min(diff_series)
            ])
        else:
            features.extend([0] * 6)
        
        # 二阶差分特征
        if len(series) > 2:
            diff2_series = np.diff(series, 2)
            features.extend([
                np.mean(diff2_series), np.std(diff2_series),
                np.sum(np.abs(diff2_series)) / len(series),  # 二阶差分的平均绝对值
            ])
        else:
            features.extend([0] * 3)
        
        # 自相关特征
        try:
            autocorr_1 = np.corrcoef(series[:-1], series[1:])[0, 1] if len(series) > 1 else 0
            autocorr_5 = np.corrcoef(series[:-5], series[5:])[0, 1] if len(series) > 5 else 0
            features.extend([autocorr_1, autocorr_5])
        except:
            features.extend([0, 0])
        
        return features
    
    def extract_frequency_features(series):
        """提取频域特征"""
        try:
            fft_vals = np.abs(fft(series))
            fft_vals = fft_vals[:len(fft_vals)//2]  # 只取正频率部分
            
            if len(fft_vals) > 0:
                # 基础频域特征
                features = [
                    np.max(fft_vals), np.mean(fft_vals), np.std(fft_vals),
                    np.argmax(fft_vals),  # 主频率
                    np.sum(fft_vals[:len(fft_vals)//4]) / np.sum(fft_vals),  # 低频能量比例
                    np.sum(fft_vals[len(fft_vals)//4:len(fft_vals)//2]) / np.sum(fft_vals),  # 中频能量比例
                    np.sum(fft_vals[len(fft_vals)//2:]) / np.sum(fft_vals),  # 高频能量比例
                ]
                
                # 频谱质心和带宽
                freqs = np.arange(len(fft_vals))
                spectral_centroid = np.sum(freqs * fft_vals) / np.sum(fft_vals)
                spectral_bandwidth = np.sqrt(np.sum(((freqs - spectral_centroid) ** 2) * fft_vals) / np.sum(fft_vals))
                features.extend([spectral_centroid, spectral_bandwidth])
                
                return features
            else:
                return [0] * 9
        except:
            return [0] * 9
    
    def assess_data_characteristics(X, y):
        """评估数据特征"""
        n_samples, n_timepoints = X.shape
        n_classes = len(np.unique(y))
        
        # 计算序列长度统计
        avg_length = n_timepoints
        
        # 计算类别平衡性
        class_counts = np.bincount(y)
        class_balance = np.min(class_counts) / np.max(class_counts)
        
        # 计算数据复杂度
        complexity_score = 0
        for series in X[:min(50, len(X))]:
            complexity_score += np.std(np.diff(series)) / (np.std(series) + 1e-8)
        complexity_score /= min(50, len(X))
        
        return {
            'n_samples': n_samples,
            'n_timepoints': n_timepoints,
            'n_classes': n_classes,
            'class_balance': class_balance,
            'complexity_score': complexity_score,
            'avg_length': avg_length
        }
    
    try:
        
        # 转换数据格式
        X = np.array(X_data)
        y = np.array(y_data)
        
        # 标签编码
        label_encoder = LabelEncoder()
        y_encoded = label_encoder.fit_transform(y)
        class_names = label_encoder.classes_
        
        # 评估数据特征
        data_chars = assess_data_characteristics(X, y_encoded)
        
        # 智能特征提取
        if method == 'intelligent_features':
            # 综合智能特征提取（统计+频域+小波+形状特征）
            features = []
            for series in X:
                feature_vector = []
                
                # 高级统计特征
                feature_vector.extend(extract_advanced_statistical_features(series))
                
                # 频域特征
                feature_vector.extend(extract_frequency_features(series))
                
                # 小波特征
                feature_vector.extend(extract_wavelet_features(series))
                
                features.append(feature_vector)
            X_features = np.array(features)
            
        elif method == 'wavelet_features':
            # 专门的小波特征提取
            features = []
            for series in X:
                feature_vector = extract_wavelet_features(series, levels=4)
                # 添加基础统计特征
                feature_vector.extend([
                    np.mean(series), np.std(series), np.max(series), np.min(series),
                    stats.skew(series), stats.kurtosis(series)
                ])
                features.append(feature_vector)
            X_features = np.array(features)
            
        elif method == 'ensemble_features':
            # 集成多种特征提取方法
            features = []
            for series in X:
                feature_vector = []
                
                # 统计特征（权重较高）
                stat_features = extract_advanced_statistical_features(series)
                feature_vector.extend(stat_features)
                
                # 频域特征
                freq_features = extract_frequency_features(series)
                feature_vector.extend(freq_features)
                
                # 小波特征（降维）
                wavelet_features = extract_wavelet_features(series, levels=2)
                feature_vector.extend(wavelet_features[:10])  # 只取前10个小波特征
                
                features.append(feature_vector)
            X_features = np.array(features)
            
        elif method == 'enhanced_features':
            # 综合特征提取（统计+频域+形状+趋势特征）
            features = []
            for series in X:
                feature_vector = []
                
                # 基础统计特征
                feature_vector.extend([
                    np.mean(series), np.std(series), np.var(series),
                    np.max(series), np.min(series), np.median(series),
                    np.percentile(series, 25), np.percentile(series, 75),
                    stats.skew(series), stats.kurtosis(series)
                ])
                
                # 趋势和变化特征
                diff_series = np.diff(series)
                feature_vector.extend([
                    np.mean(diff_series), np.std(diff_series),
                    np.sum(diff_series > 0) / len(diff_series),  # 上升比例
                    np.sum(np.abs(diff_series) > np.std(diff_series)) / len(diff_series),  # 异常变化比例
                    np.max(diff_series), np.min(diff_series)
                ])
                
                # 频域特征
                try:
                    fft_vals = np.abs(fft(series))
                    fft_vals = fft_vals[:len(fft_vals)//2]  # 只取正频率部分
                    if len(fft_vals) > 0:
                        feature_vector.extend([
                            np.max(fft_vals), np.mean(fft_vals), np.std(fft_vals),
                            np.argmax(fft_vals),  # 主频率
                            np.sum(fft_vals[:len(fft_vals)//4]) / np.sum(fft_vals),  # 低频能量比例
                        ])
                    else:
                        feature_vector.extend([0, 0, 0, 0, 0])
                except:
                    feature_vector.extend([0, 0, 0, 0, 0])
                
                # 形状和分布特征
                feature_vector.extend([
                    len(series),
                    np.max(series) - np.min(series),  # 范围
                    np.mean(np.abs(series - np.mean(series))),  # 平均绝对偏差
                    len(series[series > np.mean(series)]) / len(series),  # 高于均值的比例
                    np.sum(np.abs(np.diff(series, 2))) / len(series),  # 二阶差分的平均绝对值
                ])
                
                features.append(feature_vector)
            X_features = np.array(features)
            
        elif method == 'statistical':
            # 基础统计特征
            features = []
            for series in X:
                feature_vector = [
                    np.mean(series), np.std(series), np.min(series), np.max(series),
                    np.median(series), np.percentile(series, 25), np.percentile(series, 75),
                    len(series[series > np.mean(series)]) / len(series),
                    np.sum(np.diff(series) > 0) / (len(series) - 1),
                ]
                features.append(feature_vector)
            X_features = np.array(features)
            
        elif method == 'dtw_features':
            # 改进的DTW距离特征
            def enhanced_dtw_distance(s1, s2):
                from scipy.spatial.distance import euclidean
                min_len = min(len(s1), len(s2))
                # 标准化序列
                s1_norm = (s1[:min_len] - np.mean(s1[:min_len])) / (np.std(s1[:min_len]) + 1e-8)
                s2_norm = (s2[:min_len] - np.mean(s2[:min_len])) / (np.std(s2[:min_len]) + 1e-8)
                return euclidean(s1_norm, s2_norm)
            
            # 选择代表性序列（每个类别选择中心序列）
            representative_indices = []
            for class_label in np.unique(y_encoded):
                class_indices = np.where(y_encoded == class_label)[0]
                if len(class_indices) > 0:
                    class_data = X[class_indices]
                    center_idx = class_indices[np.argmin([np.std(series) for series in class_data])]
                    representative_indices.append(center_idx)
            
            features = []
            for series in X:
                feature_vector = []
                for rep_idx in representative_indices:
                    dist = enhanced_dtw_distance(series, X[rep_idx])
                    feature_vector.append(dist)
                # 添加统计特征
                feature_vector.extend([
                    np.mean(series), np.std(series), np.max(series) - np.min(series),
                    stats.skew(series), stats.kurtosis(series)
                ])
                features.append(feature_vector)
            X_features = np.array(features)
            
        else:  # raw
            # 原始时间序列（填充到相同长度）
            max_len = max(len(series) for series in X)
            X_features = np.array([np.pad(series, (0, max_len - len(series)), 'constant') for series in X])
        
        # 特征选择（如果启用）
        if feature_selection and X_features.shape[1] > 20:
            # 使用SelectKBest进行特征选择
            k_features = min(max(10, X_features.shape[1] // 3), 50)
            selector = SelectKBest(score_func=f_classif, k=k_features)
            X_features = selector.fit_transform(X_features, y_encoded)
            selected_features = selector.get_support(indices=True)
        else:
            selected_features = None
        
        # 数据标准化
        scaler = StandardScaler()
        X_scaled = scaler.fit_transform(X_features)
        
        # 划分训练集和测试集
        X_train, X_test, y_train, y_test = train_test_split(
            X_scaled, y_encoded, test_size=test_size, random_state=random_state, stratify=y_encoded
        )
        
        # 智能模型选择和训练
        if algorithm == 'auto_select':
            # 基于数据特征自动选择最佳算法
            algorithms_to_try = []
            
            # 根据数据特征选择合适的算法
            if data_chars['n_samples'] < 100:
                algorithms_to_try = ['svm', 'random_forest']
            elif data_chars['n_samples'] < 1000:
                algorithms_to_try = ['random_forest', 'xgboost', 'svm']
            else:
                algorithms_to_try = ['xgboost', 'random_forest', 'ensemble']
            
            # 如果类别不平衡，优先使用集成方法
            if data_chars['class_balance'] < 0.5:
                algorithms_to_try.insert(0, 'ensemble')
            
            best_score = 0
            best_algorithm = 'random_forest'
            best_classifier = None
            
            for alg in algorithms_to_try:
                try:
                    if alg == 'random_forest':
                        clf = RandomForestClassifier(
                            n_estimators=100, max_depth=10, min_samples_split=5,
                            random_state=random_state, n_jobs=-1
                        )
                    elif alg == 'xgboost':
                        try:
                            from xgboost import XGBClassifier
                            clf = XGBClassifier(
                                n_estimators=100, max_depth=6, learning_rate=0.1,
                                random_state=random_state, n_jobs=-1
                            )
                        except ImportError:
                            clf = GradientBoostingClassifier(
                                n_estimators=100, max_depth=6, learning_rate=0.1,
                                random_state=random_state
                            )
                    elif alg == 'svm':
                        clf = SVC(
                            kernel='rbf', C=1.0, gamma='scale', probability=True,
                            random_state=random_state
                        )
                    elif alg == 'ensemble':
                        rf = RandomForestClassifier(n_estimators=50, random_state=random_state)
                        gb = GradientBoostingClassifier(n_estimators=50, random_state=random_state)
                        svm = SVC(probability=True, random_state=random_state)
                        clf = VotingClassifier(
                            estimators=[('rf', rf), ('gb', gb), ('svm', svm)],
                            voting='soft'
                        )
                    
                    # 快速交叉验证评估
                    cv_score = np.mean(cross_val_score(clf, X_train, y_train, cv=3, scoring='accuracy'))
                    
                    if cv_score > best_score:
                        best_score = cv_score
                        best_algorithm = alg
                        best_classifier = clf
                        
                except Exception as e:
                    continue
            
            classifier = best_classifier
            selected_algorithm = best_algorithm
            
        elif algorithm == 'random_forest':
            classifier = RandomForestClassifier(
                n_estimators=200, max_depth=15, min_samples_split=5,
                min_samples_leaf=2, random_state=random_state, n_jobs=-1
            )
        elif algorithm == 'xgboost':
            try:
                from xgboost import XGBClassifier
                classifier = XGBClassifier(
                    n_estimators=100, max_depth=6, learning_rate=0.1,
                    random_state=random_state, n_jobs=-1
                )
            except ImportError:
                classifier = GradientBoostingClassifier(
                    n_estimators=100, max_depth=6, learning_rate=0.1,
                    random_state=random_state
                )
        elif algorithm == 'svm':
            classifier = SVC(
                kernel='rbf', C=1.0, gamma='scale', probability=True,
                random_state=random_state
            )
        elif algorithm == 'ensemble':
            # 集成学习
            rf = RandomForestClassifier(n_estimators=100, random_state=random_state)
            gb = GradientBoostingClassifier(n_estimators=100, random_state=random_state)
            svm = SVC(probability=True, random_state=random_state)
            classifier = VotingClassifier(
                estimators=[('rf', rf), ('gb', gb), ('svm', svm)],
                voting='soft'
            )
        else:
            classifier = RandomForestClassifier(n_estimators=100, random_state=random_state)
            selected_algorithm = algorithm
        
        # 自动调参（如果启用）
        if auto_tune and algorithm != 'auto_select':
            param_grids = {
                'random_forest': {
                    'n_estimators': [100, 200],
                    'max_depth': [10, 15, None],
                    'min_samples_split': [2, 5]
                },
                'svm': {
                    'C': [0.1, 1, 10],
                    'gamma': ['scale', 'auto']
                },
                'xgboost': {
                    'n_estimators': [100, 200],
                    'max_depth': [3, 6, 9],
                    'learning_rate': [0.1, 0.2]
                }
            }
            
            if algorithm in param_grids:
                try:
                    grid_search = GridSearchCV(
                        classifier, param_grids[algorithm], 
                        cv=3, scoring='accuracy', n_jobs=-1
                    )
                    grid_search.fit(X_train, y_train)
                    classifier = grid_search.best_estimator_
                    best_params = grid_search.best_params_
                except:
                    best_params = None
            else:
                best_params = None
        else:
            best_params = None
        
        # 训练模型
        classifier.fit(X_train, y_train)
        
        # 预测
        y_pred = classifier.predict(X_test)
        y_pred_proba = classifier.predict_proba(X_test)
        
        # 交叉验证评估
        cv_scores = cross_val_score(classifier, X_scaled, y_encoded, cv=5, scoring='accuracy')
        
        # 评估指标
        accuracy = accuracy_score(y_test, y_pred)
        classification_rep = classification_report(y_test, y_pred, 
                                                 target_names=[str(name) for name in class_names],
                                                 output_dict=True, zero_division=0)
        conf_matrix = confusion_matrix(y_test, y_pred)
        
        # ROC-AUC（多分类）
        try:
            if len(class_names) == 2:
                roc_auc = roc_auc_score(y_test, y_pred_proba[:, 1])
            else:
                roc_auc = roc_auc_score(y_test, y_pred_proba, multi_class='ovr', average='weighted')
        except:
            roc_auc = None
        
        # 创建增强的可视化
        fig, axes = plt.subplots(3, 3, figsize=(20, 15))
        algorithm_display = selected_algorithm if algorithm == 'auto_select' else algorithm
        fig.suptitle(f'智能时间序列分类结果 - 完善版\n方法: {method}, 算法: {algorithm_display}, 数据质量: {data_chars["class_balance"]:.2f}', fontsize=16)
        
        # 混淆矩阵
        sns.heatmap(conf_matrix, annot=True, fmt='d', cmap='Blues', 
                   xticklabels=class_names, yticklabels=class_names, ax=axes[0, 0])
        axes[0, 0].set_title('混淆矩阵')
        axes[0, 0].set_xlabel('预测标签')
        axes[0, 0].set_ylabel('真实标签')
        
        # 特征重要性
        if hasattr(classifier, 'feature_importances_'):
            feature_importance = classifier.feature_importances_
            top_indices = np.argsort(feature_importance)[-15:]
            axes[0, 1].barh(range(len(top_indices)), feature_importance[top_indices], color='skyblue')
            axes[0, 1].set_title('Top 15 特征重要性')
            axes[0, 1].set_xlabel('重要性')
            axes[0, 1].set_yticks(range(len(top_indices)))
            axes[0, 1].set_yticklabels([f'F{i}' for i in top_indices])
        else:
            axes[0, 1].text(0.5, 0.5, '该算法不支持\n特征重要性分析', 
                           ha='center', va='center', transform=axes[0, 1].transAxes)
            axes[0, 1].set_title('特征重要性')
        
        # 性能指标
        metrics = ['准确率', 'CV均值', 'CV标准差']
        values = [accuracy, np.mean(cv_scores), np.std(cv_scores)]
        if roc_auc is not None:
            metrics.append('ROC-AUC')
            values.append(roc_auc)
        
        bars = axes[0, 2].bar(metrics, values, color=['#3498db', '#e74c3c', '#f39c12', '#2ecc71'][:len(values)])
        axes[0, 2].set_title('性能指标')
        axes[0, 2].set_ylabel('分数')
        axes[0, 2].set_ylim(0, 1)
        axes[0, 2].tick_params(axis='x', rotation=45)
        
        for bar, value in zip(bars, values):
            axes[0, 2].text(bar.get_x() + bar.get_width()/2, bar.get_height() + 0.01, 
                           f'{value:.3f}', ha='center', va='bottom')
        
        # 类别分布
        unique, counts = np.unique(y_encoded, return_counts=True)
        class_labels = [class_names[i] for i in unique]
        axes[1, 0].pie(counts, labels=class_labels, autopct='%1.1f%%', startangle=90)
        axes[1, 0].set_title('类别分布')
        
        # 预测概率分布
        for i, class_name in enumerate(class_names):
            axes[1, 1].hist(y_pred_proba[:, i], alpha=0.6, label=f'类别 {class_name}', bins=20)
        axes[1, 1].set_title('预测概率分布')
        axes[1, 1].set_xlabel('预测概率')
        axes[1, 1].set_ylabel('频次')
        axes[1, 1].legend()
        
        # 交叉验证分数
        axes[1, 2].plot(range(1, len(cv_scores) + 1), cv_scores, 'bo-', linewidth=2, markersize=8)
        axes[1, 2].axhline(y=np.mean(cv_scores), color='red', linestyle='--', 
                          label=f'均值: {np.mean(cv_scores):.3f}')
        axes[1, 2].fill_between(range(1, len(cv_scores) + 1), 
                               np.mean(cv_scores) - np.std(cv_scores),
                               np.mean(cv_scores) + np.std(cv_scores),
                               alpha=0.2, color='red')
        axes[1, 2].set_title('交叉验证分数')
        axes[1, 2].set_xlabel('折数')
        axes[1, 2].set_ylabel('准确率')
        axes[1, 2].legend()
        axes[1, 2].grid(True, alpha=0.3)
        
        # 数据特征分析
        data_metrics = ['样本数', '时间点数', '类别数', '类别平衡性', '复杂度']
        data_values = [
            data_chars['n_samples'], data_chars['n_timepoints'], 
            data_chars['n_classes'], data_chars['class_balance'], 
            data_chars['complexity_score']
        ]
        # 标准化显示
        normalized_values = [v/max(data_values) for v in data_values]
        bars = axes[2, 0].bar(data_metrics, normalized_values, color=['#3498db', '#e74c3c', '#f39c12', '#2ecc71', '#9b59b6'])
        axes[2, 0].set_title('数据特征分析（标准化）')
        axes[2, 0].set_ylabel('标准化值')
        axes[2, 0].tick_params(axis='x', rotation=45)
        for bar, value in zip(bars, data_values):
            axes[2, 0].text(bar.get_x() + bar.get_width()/2, bar.get_height() + 0.01, 
                           f'{value:.2f}', ha='center', va='bottom', fontsize=8)
        
        # 特征重要性分布（如果有特征选择）
        if selected_features is not None:
            axes[2, 1].hist(selected_features, bins=20, alpha=0.7, color='skyblue', edgecolor='black')
            axes[2, 1].set_title(f'选择的特征分布 (共{len(selected_features)}个)')
            axes[2, 1].set_xlabel('特征索引')
            axes[2, 1].set_ylabel('频次')
            axes[2, 1].grid(True, alpha=0.3)
        else:
            axes[2, 1].text(0.5, 0.5, f'未进行特征选择\n总特征数: {X_features.shape[1]}', 
                           ha='center', va='center', transform=axes[2, 1].transAxes)
            axes[2, 1].set_title('特征选择信息')
        
        # 智能分析报告
        analysis_text = f"""智能分析报告:
• 数据规模: {data_chars['n_samples']}样本 × {data_chars['n_timepoints']}时间点
• 分类任务: {data_chars['n_classes']}类别分类
• 类别平衡性: {data_chars['class_balance']:.3f}
• 数据复杂度: {data_chars['complexity_score']:.3f}
• 选择算法: {algorithm_display}
• 特征提取: {method}
• 模型准确率: {accuracy:.3f}
• 交叉验证: {np.mean(cv_scores):.3f}±{np.std(cv_scores):.3f}"""
        
        if best_params:
            analysis_text += f"\n• 最优参数: {best_params}"
        if roc_auc:
            analysis_text += f"\n• ROC-AUC: {roc_auc:.3f}"
        
        axes[2, 2].text(0.05, 0.95, analysis_text, transform=axes[2, 2].transAxes, 
                        fontsize=10, verticalalignment='top', 
                        bbox=dict(boxstyle='round', facecolor='lightblue', alpha=0.8))
        axes[2, 2].set_title('智能分析报告')
        axes[2, 2].axis('off')
        
        plt.tight_layout()
        plot_base64 = create_plot_base64(fig)
        
        return {
            "method": method,
            "algorithm": algorithm,
            "selected_algorithm": selected_algorithm,
            "accuracy": float(accuracy),
            "cv_mean": float(np.mean(cv_scores)),
            "cv_std": float(np.std(cv_scores)),
            "roc_auc": float(roc_auc) if roc_auc is not None else None,
            "classification_report": classification_rep,
            "confusion_matrix": conf_matrix.tolist(),
            "predictions": y_pred.tolist(),
            "prediction_probabilities": y_pred_proba.tolist(),
            "feature_importance": classifier.feature_importances_.tolist() if hasattr(classifier, 'feature_importances_') else None,
            "class_names": class_names.tolist(),
            "cv_scores": cv_scores.tolist(),
            "best_params": best_params,
            "selected_features": selected_features.tolist() if selected_features is not None else None,
            "data_characteristics": data_chars,
            "intelligent_analysis": {
                "auto_selected_algorithm": selected_algorithm if algorithm == 'auto_select' else None,
                "feature_selection_applied": selected_features is not None,
                "n_selected_features": len(selected_features) if selected_features is not None else X_features.shape[1],
                "auto_tuning_applied": best_params is not None,
                "data_quality_score": data_chars['class_balance'],
                "complexity_assessment": "高" if data_chars['complexity_score'] > 1.0 else "中" if data_chars['complexity_score'] > 0.5 else "低",
                "recommended_improvements": [
                    "考虑增加更多训练样本" if data_chars['n_samples'] < 200 else None,
                    "建议处理类别不平衡" if data_chars['class_balance'] < 0.5 else None,
                    "可尝试更复杂的特征工程" if data_chars['complexity_score'] < 0.3 else None
                ]
            },
            "visualization": plot_base64
        }
        
    except Exception as e:
        return {"error": f"时间序列分类错误: {str(e)}"}


@mcp.tool()
def time_series_clustering(data: List[List[float]], n_clusters: int = 3, 
                          method: str = 'kmeans', distance_metric: str = 'euclidean',
                          feature_extraction: str = 'raw', auto_select_k: bool = False,
                          max_k: int = 10) -> Dict[str, Any]:
    """
    增强的时间序列聚类分析
    
    Args:
        data: 时间序列数据列表
        n_clusters: 聚类数量
        method: 聚类方法 ('kmeans', 'hierarchical', 'dbscan', 'gaussian_mixture')
        distance_metric: 距离度量 ('euclidean', 'correlation', 'dtw', 'cosine')
        feature_extraction: 特征提取方法 ('raw', 'statistical', 'fourier', 'wavelet')
        auto_select_k: 是否自动选择最优聚类数
        max_k: 自动选择时的最大聚类数
    
    Returns:
        聚类结果和可视化
    """
    try:
        from sklearn.cluster import KMeans, AgglomerativeClustering, DBSCAN
        from sklearn.mixture import GaussianMixture
        from sklearn.preprocessing import StandardScaler
        from sklearn.metrics import silhouette_score, calinski_harabasz_score, davies_bouldin_score
        from sklearn.decomposition import PCA
        from scipy.spatial.distance import pdist, squareform
        from scipy.stats import skew, kurtosis
    except ImportError:
        return {"error": "scikit-learn库未安装，无法进行聚类分析"}
    
    try:
        
        # 转换数据格式
        X = np.array(data)
        n_samples, n_timepoints = X.shape
        
        # 特征提取
        if feature_extraction == 'statistical':
            # 统计特征
            features = []
            for series in X:
                feature_vector = [
                    np.mean(series), np.std(series), np.min(series), np.max(series),
                    np.median(series), skew(series), kurtosis(series),
                    np.percentile(series, 25), np.percentile(series, 75),
                    np.sum(np.diff(series) > 0) / len(series)  # 上升趋势比例
                ]
                features.append(feature_vector)
            X_features = np.array(features)
            
        elif feature_extraction == 'fourier':
            # 傅里叶变换特征
            features = []
            for series in X:
                fft_vals = np.fft.fft(series)
                # 取前一半频率分量的幅值
                fft_features = np.abs(fft_vals[:len(fft_vals)//2])
                # 归一化
                fft_features = fft_features / np.sum(fft_features)
                features.append(fft_features)
            X_features = np.array(features)
            
        elif feature_extraction == 'wavelet':
            # 小波变换特征（简化版）
            features = []
            for series in X:
                # 简单的小波近似：多尺度平均
                wavelet_features = []
                for scale in [2, 4, 8]:
                    if scale < len(series):
                        downsampled = series[::scale]
                        wavelet_features.extend([np.mean(downsampled), np.std(downsampled)])
                    else:
                        wavelet_features.extend([np.mean(series), np.std(series)])
                features.append(wavelet_features)
            X_features = np.array(features)
            
        else:  # raw
            X_features = X
        
        # 自动选择最优聚类数
        if auto_select_k and method in ['kmeans', 'gaussian_mixture']:
            silhouette_scores = []
            k_range = range(2, min(max_k + 1, n_samples))
            
            for k in k_range:
                if method == 'kmeans':
                    temp_clustering = KMeans(n_clusters=k, random_state=42, n_init=10)
                else:
                    temp_clustering = GaussianMixture(n_components=k, random_state=42)
                
                temp_labels = temp_clustering.fit_predict(StandardScaler().fit_transform(X_features))
                if len(np.unique(temp_labels)) > 1:
                    score = silhouette_score(X_features, temp_labels)
                    silhouette_scores.append(score)
                else:
                    silhouette_scores.append(-1)
            
            if silhouette_scores:
                optimal_k = k_range[np.argmax(silhouette_scores)]
                n_clusters = optimal_k
        
        # 距离计算和聚类
        if distance_metric == 'correlation':
            # 相关性距离
            correlation_matrix = np.corrcoef(X_features)
            distance_matrix = 1 - np.abs(correlation_matrix)
            
            clustering = AgglomerativeClustering(
                n_clusters=n_clusters, 
                metric='precomputed', 
                linkage='average'
            )
            labels = clustering.fit_predict(distance_matrix)
            X_for_metrics = X_features
            
        elif distance_metric == 'cosine':
            # 余弦距离
            from sklearn.metrics.pairwise import cosine_distances
            distance_matrix = cosine_distances(X_features)
            
            clustering = AgglomerativeClustering(
                n_clusters=n_clusters,
                metric='precomputed',
                linkage='average'
            )
            labels = clustering.fit_predict(distance_matrix)
            X_for_metrics = X_features
            
        else:  # euclidean 或其他
            # 标准化数据
            scaler = StandardScaler()
            X_scaled = scaler.fit_transform(X_features)
            
            if method == 'kmeans':
                clustering = KMeans(n_clusters=n_clusters, random_state=42, n_init=10)
                labels = clustering.fit_predict(X_scaled)
            elif method == 'hierarchical':
                clustering = AgglomerativeClustering(n_clusters=n_clusters)
                labels = clustering.fit_predict(X_scaled)
            elif method == 'dbscan':
                clustering = DBSCAN(eps=0.5, min_samples=5)
                labels = clustering.fit_predict(X_scaled)
                n_clusters = len(np.unique(labels[labels != -1]))
            elif method == 'gaussian_mixture':
                clustering = GaussianMixture(n_components=n_clusters, random_state=42)
                labels = clustering.fit_predict(X_scaled)
            else:
                return {"error": f"不支持的聚类方法: {method}"}
            
            X_for_metrics = X_scaled
        
        # 计算聚类评估指标
        try:
            unique_labels = np.unique(labels)
            if len(unique_labels) > 1 and -1 not in unique_labels:  # 排除噪声点
                silhouette_avg = silhouette_score(X_for_metrics, labels)
                calinski_harabasz = calinski_harabasz_score(X_for_metrics, labels)
                davies_bouldin = davies_bouldin_score(X_for_metrics, labels)
            else:
                silhouette_avg = calinski_harabasz = davies_bouldin = None
        except:
            silhouette_avg = calinski_harabasz = davies_bouldin = None
        
        # 计算聚类中心（基于原始时间序列）
        cluster_centers = []
        cluster_info = []
        
        for i in range(n_clusters):
            cluster_mask = (labels == i)
            cluster_data = X[cluster_mask]
            
            if len(cluster_data) > 0:
                center = np.mean(cluster_data, axis=0)
                cluster_centers.append(center)
                
                # 计算聚类内统计信息
                intra_distances = pdist(cluster_data)
                cluster_info.append({
                    'size': int(np.sum(cluster_mask)),
                    'mean_intra_distance': float(np.mean(intra_distances)) if len(intra_distances) > 0 else 0,
                    'std_intra_distance': float(np.std(intra_distances)) if len(intra_distances) > 0 else 0
                })
            else:
                cluster_centers.append(np.zeros(n_timepoints))
                cluster_info.append({'size': 0, 'mean_intra_distance': 0, 'std_intra_distance': 0})
        
        # 处理DBSCAN的噪声点
        noise_count = np.sum(labels == -1) if method == 'dbscan' else 0
        
        # 创建增强的可视化
        fig, axes = plt.subplots(3, 2, figsize=(18, 15))
        fig.suptitle(f'增强时间序列聚类分析\n方法: {method}, 距离: {distance_metric}, 特征: {feature_extraction}', fontsize=16)
        
        # 聚类结果
        colors = plt.cm.Set3(np.linspace(0, 1, max(n_clusters, 3)))
        for i in range(n_clusters):
            cluster_data = X[labels == i]
            for series in cluster_data:
                axes[0, 0].plot(series, color=colors[i], alpha=0.3, linewidth=1)
            # 绘制聚类中心
            if len(cluster_data) > 0:
                axes[0, 0].plot(cluster_centers[i], color=colors[i], linewidth=3, 
                               label=f'聚类 {i} (n={cluster_info[i]["size"]})')
        
        # 处理噪声点
        if noise_count > 0:
            noise_data = X[labels == -1]
            for series in noise_data:
                axes[0, 0].plot(series, color='black', alpha=0.3, linewidth=1)
            axes[0, 0].plot([], [], color='black', alpha=0.3, label=f'噪声点 (n={noise_count})')
        
        axes[0, 0].set_title('聚类结果')
        axes[0, 0].legend()
        axes[0, 0].grid(True, alpha=0.3)
        
        # 聚类中心对比
        for i, center in enumerate(cluster_centers):
            axes[0, 1].plot(center, color=colors[i], linewidth=2, 
                           marker='o', label=f'聚类 {i}')
        axes[0, 1].set_title('聚类中心对比')
        axes[0, 1].legend()
        axes[0, 1].grid(True, alpha=0.3)
        
        # 聚类分布
        unique, counts = np.unique(labels[labels != -1], return_counts=True)
        bars = axes[1, 0].bar(unique, counts, color=colors[:len(unique)])
        if noise_count > 0:
            axes[1, 0].bar([-1], [noise_count], color='black', alpha=0.7, label='噪声点')
        axes[1, 0].set_title('聚类分布')
        axes[1, 0].set_xlabel('聚类标签')
        axes[1, 0].set_ylabel('样本数量')
        
        # PCA可视化（如果特征维度>2）
        if X_for_metrics.shape[1] > 2:
            pca = PCA(n_components=2)
            X_pca = pca.fit_transform(X_for_metrics)
            
            for i in range(n_clusters):
                cluster_mask = (labels == i)
                if np.sum(cluster_mask) > 0:
                    axes[1, 1].scatter(X_pca[cluster_mask, 0], X_pca[cluster_mask, 1], 
                                     c=colors[i], label=f'聚类 {i}', alpha=0.7)
            
            if noise_count > 0:
                noise_mask = (labels == -1)
                axes[1, 1].scatter(X_pca[noise_mask, 0], X_pca[noise_mask, 1], 
                                 c='black', label='噪声点', alpha=0.7, marker='x')
            
            axes[1, 1].set_title(f'PCA可视化 (解释方差: {pca.explained_variance_ratio_.sum():.3f})')
            axes[1, 1].set_xlabel(f'PC1 ({pca.explained_variance_ratio_[0]:.3f})')
            axes[1, 1].set_ylabel(f'PC2 ({pca.explained_variance_ratio_[1]:.3f})')
            axes[1, 1].legend()
            axes[1, 1].grid(True, alpha=0.3)
        else:
            axes[1, 1].text(0.5, 0.5, '特征维度≤2\n无需PCA降维', 
                           transform=axes[1, 1].transAxes, ha='center', va='center')
            axes[1, 1].set_title('PCA可视化')
        
        # 评估指标
        metrics_text = f"聚类评估指标:\n\n"
        if silhouette_avg is not None:
            metrics_text += f"轮廓系数: {silhouette_avg:.3f}\n"
        if calinski_harabasz is not None:
            metrics_text += f"Calinski-Harabasz: {calinski_harabasz:.3f}\n"
        if davies_bouldin is not None:
            metrics_text += f"Davies-Bouldin: {davies_bouldin:.3f}\n"
        metrics_text += f"\n聚类数量: {n_clusters}\n"
        metrics_text += f"样本总数: {n_samples}\n"
        if noise_count > 0:
            metrics_text += f"噪声点: {noise_count}\n"
        metrics_text += f"特征维度: {X_for_metrics.shape[1]}"
        
        axes[2, 0].text(0.1, 0.5, metrics_text, transform=axes[2, 0].transAxes, 
                        fontsize=12, verticalalignment='center',
                        bbox=dict(boxstyle="round,pad=0.3", facecolor="lightblue", alpha=0.8))
        axes[2, 0].set_title('聚类评估指标')
        axes[2, 0].axis('off')
        
        # 聚类内距离分布
        if len(cluster_info) > 0:
            cluster_ids = list(range(n_clusters))
            intra_distances = [info['mean_intra_distance'] for info in cluster_info]
            
            bars = axes[2, 1].bar(cluster_ids, intra_distances, color=colors[:n_clusters])
            axes[2, 1].set_title('聚类内平均距离')
            axes[2, 1].set_xlabel('聚类ID')
            axes[2, 1].set_ylabel('平均距离')
            axes[2, 1].grid(True, alpha=0.3)
        
        plt.tight_layout()
        plot_base64 = create_plot_base64(fig)
        
        return {
            "method": method,
            "distance_metric": distance_metric,
            "feature_extraction": feature_extraction,
            "n_clusters": n_clusters,
            "auto_selected_k": auto_select_k,
            "labels": labels.tolist(),
            "cluster_centers": [center.tolist() for center in cluster_centers],
            "cluster_info": cluster_info,
            "noise_count": noise_count,
            "silhouette_score": float(silhouette_avg) if silhouette_avg is not None else None,
            "calinski_harabasz_score": float(calinski_harabasz) if calinski_harabasz is not None else None,
            "davies_bouldin_score": float(davies_bouldin) if davies_bouldin is not None else None,
            "feature_dimensions": X_for_metrics.shape[1],
            "visualization": plot_base64
        }
        
    except Exception as e:
        return {"error": f"时间序列聚类错误: {str(e)}"}


def detect_seasonality(data):
    """智能检测时间序列的季节性周期"""
    try:
        from scipy.fft import fft
        from scipy.signal import find_peaks
        
        n = len(data)
        if n < 20:
            return None
        
        # 使用FFT检测主要频率
        fft_vals = np.abs(fft(data - np.mean(data)))
        freqs = np.fft.fftfreq(n)
        
        # 找到主要频率峰值
        peaks, _ = find_peaks(fft_vals[:n//2], height=np.max(fft_vals)*0.1)
        
        if len(peaks) > 0:
            # 选择最强的频率
            main_freq_idx = peaks[np.argmax(fft_vals[peaks])]
            if freqs[main_freq_idx] > 0:
                period = int(1 / freqs[main_freq_idx])
                # 验证周期的合理性
                if 3 <= period <= n // 3:
                    return period
        
        # 备用方法：自相关检测
        autocorr = np.correlate(data, data, mode='full')
        autocorr = autocorr[autocorr.size // 2:]
        
        # 寻找自相关峰值
        peaks, _ = find_peaks(autocorr[1:], height=np.max(autocorr)*0.3)
        if len(peaks) > 0:
            period = peaks[0] + 1
            if 3 <= period <= n // 3:
                return period
        
        return min(24, n // 4)  # 默认值
    except:
        return min(24, n // 4)


def estimate_contamination_rate(data):
    """智能估计异常值污染率"""
    try:
        # 使用IQR方法初步估计
        q1, q3 = np.percentile(data, [25, 75])
        iqr = q3 - q1
        lower_bound = q1 - 1.5 * iqr
        upper_bound = q3 + 1.5 * iqr
        
        outliers_iqr = np.sum((data < lower_bound) | (data > upper_bound))
        contamination_iqr = outliers_iqr / len(data)
        
        # 使用修正Z-score方法
        median = np.median(data)
        mad = np.median(np.abs(data - median))
        modified_z_scores = 0.6745 * (data - median) / (mad + 1e-8)
        outliers_z = np.sum(np.abs(modified_z_scores) > 3.5)
        contamination_z = outliers_z / len(data)
        
        # 取两种方法的平均值，并限制在合理范围内
        estimated_contamination = (contamination_iqr + contamination_z) / 2
        return max(0.01, min(0.3, estimated_contamination))
    except:
        return 0.1


def assess_data_quality(data):
    """评估数据质量"""
    try:
        quality_metrics = {
            'missing_values': np.sum(np.isnan(data)) / len(data),
            'infinite_values': np.sum(np.isinf(data)) / len(data),
            'zero_variance': np.var(data) < 1e-10,
            'skewness': abs(stats.skew(data)),
            'kurtosis': abs(stats.kurtosis(data)),
            'outlier_ratio': estimate_contamination_rate(data)
        }
        
        # 计算总体质量分数
        quality_score = 1.0
        if quality_metrics['missing_values'] > 0.1:
            quality_score -= 0.3
        if quality_metrics['infinite_values'] > 0:
            quality_score -= 0.2
        if quality_metrics['zero_variance']:
            quality_score -= 0.4
        if quality_metrics['skewness'] > 2:
            quality_score -= 0.1
        if quality_metrics['outlier_ratio'] > 0.2:
            quality_score -= 0.1
        
        quality_metrics['overall_score'] = max(0, quality_score)
        return quality_metrics
    except:
        return {'overall_score': 0.5, 'error': 'Unable to assess data quality'}


@mcp.tool()
def anomaly_detection(data: List[float], method: str = 'isolation_forest', 
                     contamination: float = 0.1, window_size: int = 10,
                     seasonal_period: int = None, sensitivity: float = 2.5,
                     auto_tune: bool = True, confidence_level: float = 0.95) -> Dict[str, Any]:
    """
    智能增强时间序列异常检测 - 完善版
    
    Args:
        data: 时间序列数据
        method: 异常检测方法 ('isolation_forest', 'statistical', 'moving_average', 'seasonal', 'ensemble', 'lstm_autoencoder', 'robust_covariance')
        contamination: 异常值比例
        window_size: 滑动窗口大小
        seasonal_period: 季节性周期（用于季节性异常检测）
        sensitivity: 敏感度参数（用于统计方法）
        auto_tune: 是否自动调优参数
        confidence_level: 置信水平
    
    Returns:
        异常检测结果和可视化
    """
    try:
        ts_data = np.array(data)
        n_points = len(ts_data)
        
        # 数据预处理和验证
        if n_points < 10:
            return {"error": "数据点数量过少，至少需要10个数据点"}
        
        # 智能参数调优
        if auto_tune:
            # 自动调整窗口大小
            if window_size is None or window_size <= 0:
                window_size = min(max(10, n_points // 20), 50)
            
            # 自动检测季节性周期
            if seasonal_period is None and method in ['seasonal', 'ensemble']:
                seasonal_period = detect_seasonality(ts_data)
            
            # 自动调整污染率
            if contamination == 0.1:  # 默认值
                contamination = estimate_contamination_rate(ts_data)
        
        # 数据质量检查
        data_quality = assess_data_quality(ts_data)
        
        if method == 'isolation_forest':
            try:
                from sklearn.ensemble import IsolationForest
                from scipy.stats import skew, kurtosis
            except ImportError:
                return {"error": "scikit-learn库未安装，无法使用Isolation Forest"}
            
            # 创建增强特征矩阵
            features = []
            for i in range(window_size, n_points):
                window = ts_data[i-window_size:i]
                feature_vector = [
                    np.mean(window),
                    np.std(window),
                    np.min(window),
                    np.max(window),
                    ts_data[i],
                    ts_data[i] - np.mean(window),  # 与窗口均值的差异
                    (ts_data[i] - np.mean(window)) / (np.std(window) + 1e-8),  # 标准化差异
                    skew(window),  # 偏度
                    kurtosis(window),  # 峰度
                    np.sum(np.diff(window) > 0) / len(window) if len(window) > 1 else 0,  # 上升趋势比例
                ]
                features.append(feature_vector)
            
            X = np.array(features)
            
            # 训练Isolation Forest
            clf = IsolationForest(contamination=contamination, random_state=42, n_estimators=200)
            anomaly_labels = clf.fit_predict(X)
            
            # 创建完整的异常标签数组
            full_anomaly_labels = np.ones(n_points)
            full_anomaly_labels[window_size:] = anomaly_labels
            
            anomaly_scores = np.ones(n_points)
            anomaly_scores[window_size:] = clf.decision_function(X)
            
        elif method == 'statistical':
            # 增强的统计异常检测
            # 使用修正的Z-score（基于中位数）
            median = np.median(ts_data)
            mad = np.median(np.abs(ts_data - median))  # 中位数绝对偏差
            modified_z_scores = 0.6745 * (ts_data - median) / (mad + 1e-8)
            
            # 结合标准Z-score
            z_scores = np.abs((ts_data - np.mean(ts_data)) / (np.std(ts_data) + 1e-8))
            
            # 综合异常分数
            combined_scores = np.maximum(np.abs(modified_z_scores), z_scores)
            
            full_anomaly_labels = np.where(combined_scores > sensitivity, -1, 1)
            anomaly_scores = -combined_scores  # 负值表示更异常
            
        elif method == 'moving_average':
            # 增强的移动平均异常检测
            # 使用指数移动平均
            alpha = 2.0 / (window_size + 1)
            ema = np.zeros(n_points)
            ema[0] = ts_data[0]
            
            for i in range(1, n_points):
                ema[i] = alpha * ts_data[i] + (1 - alpha) * ema[i-1]
            
            # 计算移动标准差
            moving_std = np.array([np.std(ts_data[max(0, i-window_size//2):min(n_points, i+window_size//2+1)]) 
                                  for i in range(n_points)])
            
            # 计算偏差
            deviations = np.abs(ts_data - ema)
            threshold = sensitivity * moving_std
            
            full_anomaly_labels = np.where(deviations > threshold, -1, 1)
            anomaly_scores = -deviations / (moving_std + 1e-8)  # 标准化偏差
            
        elif method == 'seasonal':
            # 季节性异常检测
            if seasonal_period is None:
                seasonal_period = min(24, n_points // 4)  # 默认季节周期
            
            if seasonal_period >= n_points:
                return {"error": "季节周期过大，无法进行季节性异常检测"}
            
            # 计算季节性基线
            seasonal_baseline = np.zeros(n_points)
            seasonal_std = np.zeros(n_points)
            
            for i in range(n_points):
                # 获取同一季节位置的历史数据
                season_indices = list(range(i % seasonal_period, n_points, seasonal_period))
                season_data = np.array([ts_data[idx] for idx in season_indices])
                
                seasonal_baseline[i] = np.median(season_data)
                seasonal_std[i] = np.std(season_data) if len(season_data) > 1 else np.std(ts_data)
            
            # 计算季节性偏差
            seasonal_deviations = np.abs(ts_data - seasonal_baseline)
            threshold = sensitivity * seasonal_std
            
            full_anomaly_labels = np.where(seasonal_deviations > threshold, -1, 1)
            anomaly_scores = -seasonal_deviations / (seasonal_std + 1e-8)
            
        elif method == 'robust_covariance':
            # 鲁棒协方差异常检测
            try:
                from sklearn.covariance import EllipticEnvelope
                
                # 创建特征矩阵
                features = []
                for i in range(window_size, n_points):
                    window = ts_data[i-window_size:i]
                    feature_vector = [
                        np.mean(window),
                        np.std(window),
                        np.median(window),
                        np.percentile(window, 25),
                        np.percentile(window, 75),
                        ts_data[i],
                        ts_data[i] - np.mean(window),
                        np.sum(np.diff(window) > 0) / len(window) if len(window) > 1 else 0
                    ]
                    features.append(feature_vector)
                
                X = np.array(features)
                
                # 鲁棒协方差估计
                robust_cov = EllipticEnvelope(contamination=contamination, random_state=42)
                anomaly_labels = robust_cov.fit_predict(X)
                
                full_anomaly_labels = np.ones(n_points)
                full_anomaly_labels[window_size:] = anomaly_labels
                
                anomaly_scores = np.ones(n_points)
                anomaly_scores[window_size:] = robust_cov.decision_function(X)
                
            except ImportError:
                return {"error": "scikit-learn库未安装，无法使用鲁棒协方差方法"}
        
        elif method == 'lstm_autoencoder':
            # LSTM自编码器异常检测（简化版）
            try:
                # 创建序列数据
                sequences = []
                for i in range(window_size, n_points):
                    sequences.append(ts_data[i-window_size:i])
                
                sequences = np.array(sequences)
                
                # 简化的自编码器：使用PCA作为降维重构
                from sklearn.decomposition import PCA
                
                # 标准化
                scaler = StandardScaler()
                sequences_scaled = scaler.fit_transform(sequences)
                
                # PCA降维重构
                n_components = min(max(2, window_size // 3), sequences_scaled.shape[1] - 1)
                pca = PCA(n_components=n_components)
                sequences_reduced = pca.fit_transform(sequences_scaled)
                sequences_reconstructed = pca.inverse_transform(sequences_reduced)
                
                # 计算重构误差
                reconstruction_errors = np.mean((sequences_scaled - sequences_reconstructed) ** 2, axis=1)
                
                # 设置阈值
                threshold = np.percentile(reconstruction_errors, (1 - contamination) * 100)
                
                full_anomaly_labels = np.ones(n_points)
                full_anomaly_labels[window_size:] = np.where(reconstruction_errors > threshold, -1, 1)
                
                anomaly_scores = np.ones(n_points)
                anomaly_scores[window_size:] = -reconstruction_errors  # 负值表示更异常
                
            except Exception as e:
                return {"error": f"LSTM自编码器方法失败: {str(e)}"}
        
        elif method == 'ensemble':
            # 增强集成异常检测
            methods = ['statistical', 'moving_average']
            try:
                from sklearn.ensemble import IsolationForest
                methods.append('isolation_forest')
            except ImportError:
                pass
            
            try:
                from sklearn.covariance import EllipticEnvelope
                methods.append('robust_covariance')
            except ImportError:
                pass
            
            ensemble_scores = []
            ensemble_labels = []
            
            for sub_method in methods:
                # 直接调用子方法，避免递归
                if sub_method == 'statistical':
                    # 统计方法的实现
                    median = np.median(ts_data)
                    mad = np.median(np.abs(ts_data - median))
                    modified_z_scores = 0.6745 * (ts_data - median) / (mad + 1e-8)
                    z_scores = np.abs((ts_data - np.mean(ts_data)) / (np.std(ts_data) + 1e-8))
                    combined_scores = np.maximum(np.abs(modified_z_scores), z_scores)
                    sub_labels = np.where(combined_scores > sensitivity, -1, 1)
                    sub_scores = -combined_scores
                elif sub_method == 'moving_average':
                    # 移动平均方法的实现
                    alpha = 2.0 / (window_size + 1)
                    ema = np.zeros(n_points)
                    ema[0] = ts_data[0]
                    for i in range(1, n_points):
                        ema[i] = alpha * ts_data[i] + (1 - alpha) * ema[i-1]
                    moving_std = np.array([np.std(ts_data[max(0, i-window_size//2):min(n_points, i+window_size//2+1)]) 
                                          for i in range(n_points)])
                    deviations = np.abs(ts_data - ema)
                    threshold = sensitivity * moving_std
                    sub_labels = np.where(deviations > threshold, -1, 1)
                    sub_scores = -deviations / (moving_std + 1e-8)
                elif sub_method == 'isolation_forest':
                    # 隔离森林方法的实现
                    from sklearn.ensemble import IsolationForest
                    from scipy.stats import skew, kurtosis
                    features = []
                    for i in range(window_size, n_points):
                        window = ts_data[i-window_size:i]
                        feature_vector = [
                            np.mean(window), np.std(window), np.min(window), np.max(window),
                            ts_data[i], ts_data[i] - np.mean(window),
                            (ts_data[i] - np.mean(window)) / (np.std(window) + 1e-8),
                            skew(window), kurtosis(window),
                            np.sum(np.diff(window) > 0) / len(window) if len(window) > 1 else 0,
                        ]
                        features.append(feature_vector)
                    X = np.array(features)
                    clf = IsolationForest(contamination=contamination, random_state=42, n_estimators=200)
                    anomaly_labels_sub = clf.fit_predict(X)
                    sub_labels = np.ones(n_points)
                    sub_labels[window_size:] = anomaly_labels_sub
                    sub_scores = np.ones(n_points)
                    sub_scores[window_size:] = clf.decision_function(X)
                elif sub_method == 'robust_covariance':
                    # 鲁棒协方差方法的实现
                    from sklearn.covariance import EllipticEnvelope
                    features = []
                    for i in range(window_size, n_points):
                        window = ts_data[i-window_size:i]
                        feature_vector = [
                            np.mean(window), np.std(window), np.median(window),
                            np.percentile(window, 25), np.percentile(window, 75),
                            ts_data[i], ts_data[i] - np.mean(window),
                            np.sum(np.diff(window) > 0) / len(window) if len(window) > 1 else 0
                        ]
                        features.append(feature_vector)
                    X = np.array(features)
                    robust_cov = EllipticEnvelope(contamination=contamination, random_state=42)
                    anomaly_labels_sub = robust_cov.fit_predict(X)
                    sub_labels = np.ones(n_points)
                    sub_labels[window_size:] = anomaly_labels_sub
                    sub_scores = np.ones(n_points)
                    sub_scores[window_size:] = robust_cov.decision_function(X)
                
                ensemble_scores.append(sub_scores)
                ensemble_labels.append(sub_labels)
            
            if not ensemble_scores:
                return {"error": "集成方法中所有子方法都失败"}
            
            # 平均异常分数
            anomaly_scores = np.mean(ensemble_scores, axis=0)
            
            # 投票决定异常标签
            ensemble_labels = np.array(ensemble_labels)
            vote_counts = np.sum(ensemble_labels == -1, axis=0)
            full_anomaly_labels = np.where(vote_counts >= len(methods) // 2 + 1, -1, 1)
            
        else:
            return {"error": f"不支持的异常检测方法: {method}"}
        
        # 识别异常点
        anomaly_indices = np.where(full_anomaly_labels == -1)[0]
        normal_indices = np.where(full_anomaly_labels == 1)[0]
        
        # 计算异常点的严重程度
        if len(anomaly_indices) > 0:
            anomaly_severity = np.abs(anomaly_scores[anomaly_indices])
            severity_percentiles = np.percentile(anomaly_severity, [25, 50, 75, 90])
        else:
            severity_percentiles = np.array([0, 0, 0, 0])
        
        # 创建增强的可视化
        fig, axes = plt.subplots(3, 2, figsize=(20, 16))
        fig.suptitle(f'智能时间序列异常检测 - 完善版\n方法: {method} | 数据质量: {data_quality.get("overall_score", 0):.2f}', fontsize=16)
        
        # 原始数据和异常点
        axes[0, 0].plot(ts_data, label='原始数据', linewidth=2, alpha=0.8, color='blue')
        if len(anomaly_indices) > 0:
            # 根据严重程度着色异常点
            severity_colors = ['orange', 'red', 'darkred']
            for i, idx in enumerate(anomaly_indices):
                severity = np.abs(anomaly_scores[idx])
                if severity <= severity_percentiles[1]:  # 轻微异常
                    color = 'orange'
                    size = 30
                elif severity <= severity_percentiles[2]:  # 中等异常
                    color = 'red'
                    size = 50
                else:  # 严重异常
                    color = 'darkred'
                    size = 70
                
                axes[0, 0].scatter(idx, ts_data[idx], color=color, s=size, 
                                 alpha=0.8, zorder=5, edgecolors='black', linewidth=0.5)
            
            # 添加图例
            axes[0, 0].scatter([], [], color='orange', s=30, label='轻微异常', alpha=0.8)
            axes[0, 0].scatter([], [], color='red', s=50, label='中等异常', alpha=0.8)
            axes[0, 0].scatter([], [], color='darkred', s=70, label='严重异常', alpha=0.8)
        
        axes[0, 0].set_title(f'时间序列数据与异常点 (共{len(anomaly_indices)}个)')
        axes[0, 0].set_xlabel('时间')
        axes[0, 0].set_ylabel('值')
        axes[0, 0].legend()
        axes[0, 0].grid(True, alpha=0.3)
        
        # 异常分数时间序列
        axes[0, 1].plot(anomaly_scores, label='异常分数', linewidth=2, color='orange')
        threshold_line = np.percentile(anomaly_scores, (1-contamination)*100)
        axes[0, 1].axhline(y=threshold_line, color='red', linestyle='--', 
                          label=f'{contamination*100}%阈值 ({threshold_line:.3f})')
        axes[0, 1].fill_between(range(n_points), anomaly_scores, threshold_line, 
                               where=(anomaly_scores < threshold_line), 
                               color='red', alpha=0.3, label='异常区域')
        axes[0, 1].set_title('异常分数时间序列')
        axes[0, 1].set_xlabel('时间')
        axes[0, 1].set_ylabel('异常分数')
        axes[0, 1].legend()
        axes[0, 1].grid(True, alpha=0.3)
        
        # 异常分数分布
        axes[1, 0].hist(anomaly_scores, bins=50, alpha=0.7, color='skyblue', edgecolor='black')
        axes[1, 0].axvline(threshold_line, color='red', linestyle='--', linewidth=2, 
                          label=f'阈值 ({threshold_line:.3f})')
        axes[1, 0].set_title('异常分数分布')
        axes[1, 0].set_xlabel('异常分数')
        axes[1, 0].set_ylabel('频次')
        axes[1, 0].legend()
        axes[1, 0].grid(True, alpha=0.3)
        
        # 异常检测统计
        stats_data = {
            '正常点': len(normal_indices),
            '异常点': len(anomaly_indices)
        }
        
        bars = axes[1, 1].bar(stats_data.keys(), stats_data.values(), 
                             color=['green', 'red'], alpha=0.7)
        axes[1, 1].set_title('异常检测统计')
        axes[1, 1].set_ylabel('数量')
        
        # 数据质量评估可视化
        quality_metrics = ['缺失值', '无穷值', '偏度', '峰度', '异常率']
        quality_values = [
            data_quality.get('missing_values', 0),
            data_quality.get('infinite_values', 0),
            min(data_quality.get('skewness', 0) / 5, 1),  # 归一化
            min(data_quality.get('kurtosis', 0) / 10, 1),  # 归一化
            data_quality.get('outlier_ratio', 0)
        ]
        
        bars = axes[2, 0].bar(quality_metrics, quality_values, 
                             color=['blue', 'orange', 'green', 'purple', 'red'], alpha=0.7)
        axes[2, 0].set_title(f'数据质量评估 (总分: {data_quality.get("overall_score", 0):.2f})')
        axes[2, 0].set_ylabel('指标值')
        axes[2, 0].tick_params(axis='x', rotation=45)
        axes[2, 0].set_ylim(0, 1)
        
        # 智能分析结果
        analysis_text = f"""智能分析结果:

数据特征:
• 总数据点: {n_points}
• 检测到的季节周期: {seasonal_period if seasonal_period else '无'}
• 自动调优污染率: {contamination:.3f}
• 优化窗口大小: {window_size}

异常检测结果:
• 异常点数量: {len(anomaly_indices)}
• 异常比例: {len(anomaly_indices)/n_points*100:.2f}%
• 置信水平: {confidence_level*100:.1f}%

严重程度分布:
• 轻微异常: {np.sum(np.abs(anomaly_scores[anomaly_indices]) <= severity_percentiles[1]) if len(anomaly_indices) > 0 else 0}
• 中等异常: {np.sum((np.abs(anomaly_scores[anomaly_indices]) > severity_percentiles[1]) & (np.abs(anomaly_scores[anomaly_indices]) <= severity_percentiles[2])) if len(anomaly_indices) > 0 else 0}
• 严重异常: {np.sum(np.abs(anomaly_scores[anomaly_indices]) > severity_percentiles[2]) if len(anomaly_indices) > 0 else 0}

数据质量:
• 整体质量分数: {data_quality.get('overall_score', 0):.3f}
• 数据偏度: {data_quality.get('skewness', 0):.3f}
• 数据峰度: {data_quality.get('kurtosis', 0):.3f}"""
        
        axes[2, 1].text(0.05, 0.95, analysis_text, transform=axes[2, 1].transAxes, 
                        fontsize=10, verticalalignment='top',
                        bbox=dict(boxstyle="round,pad=0.3", facecolor="lightblue", alpha=0.8))
        axes[2, 1].set_title('智能分析报告')
        axes[2, 1].axis('off')
        
        plt.tight_layout()
        plot_base64 = create_plot_base64(fig)
        
        # 异常点详细信息
        anomaly_details = []
        for idx in anomaly_indices:
            severity = np.abs(anomaly_scores[idx])
            if severity <= severity_percentiles[1]:
                level = '轻微'
            elif severity <= severity_percentiles[2]:
                level = '中等'
            else:
                level = '严重'
            
            anomaly_details.append({
                'index': int(idx),
                'value': float(ts_data[idx]),
                'score': float(anomaly_scores[idx]),
                'severity': level
            })
        
        return {
            "method": method,
            "total_points": n_points,
            "anomaly_count": len(anomaly_indices),
            "anomaly_ratio": float(len(anomaly_indices) / n_points),
            "anomaly_indices": [int(x) for x in anomaly_indices],
            "anomaly_scores": [float(x) for x in anomaly_scores],
            "anomaly_labels": [int(x) for x in full_anomaly_labels],
            "anomaly_details": anomaly_details,
            "severity_percentiles": [float(x) for x in severity_percentiles],
            "contamination": float(contamination),
            "window_size": int(window_size),
            "sensitivity": float(sensitivity),
            "threshold": float(threshold_line),
            "confidence_level": float(confidence_level),
            "auto_tuned": auto_tune,
            "detected_seasonal_period": seasonal_period,
            "data_quality": {
                "overall_score": float(data_quality.get('overall_score', 0)),
                "missing_values_ratio": float(data_quality.get('missing_values', 0)),
                "infinite_values_ratio": float(data_quality.get('infinite_values', 0)),
                "skewness": float(data_quality.get('skewness', 0)),
                "kurtosis": float(data_quality.get('kurtosis', 0)),
                "estimated_outlier_ratio": float(data_quality.get('outlier_ratio', 0))
            },
            "severity_distribution": {
                "mild": int(np.sum(np.abs(anomaly_scores[anomaly_indices]) <= severity_percentiles[1]) if len(anomaly_indices) > 0 else 0),
                "moderate": int(np.sum((np.abs(anomaly_scores[anomaly_indices]) > severity_percentiles[1]) & (np.abs(anomaly_scores[anomaly_indices]) <= severity_percentiles[2])) if len(anomaly_indices) > 0 else 0),
                "severe": int(np.sum(np.abs(anomaly_scores[anomaly_indices]) > severity_percentiles[2]) if len(anomaly_indices) > 0 else 0)
            },
            "intelligent_analysis": {
                "auto_optimized_contamination": float(contamination),
                "auto_optimized_window_size": int(window_size),
                "seasonality_detected": seasonal_period is not None,
                "data_quality_score": float(data_quality.get('overall_score', 0)),
                "recommended_method": method,
                "confidence_score": float(min(1.0, data_quality.get('overall_score', 0) * (1 - len(anomaly_indices) / n_points * 2)))
            },
            "visualization": plot_base64
        }
        
    except Exception as e:
        import traceback
        error_details = traceback.format_exc()
        return {"error": f"异常检测错误: {str(e)}\n详细错误信息:\n{error_details}"}



@mcp.tool()
async def kmeans_clustering(data: List[List[float]], n_clusters: int = 3, 
                           max_iter: int = 300, random_state: int = 42) -> Dict[str, Any]:
    """K-means聚类分析
    
    Args:
        data: 数据点列表
        n_clusters: 聚类数量
        max_iter: 最大迭代次数
        random_state: 随机种子
        
    Returns:
        包含聚类结果和可视化的字典
    """
    try:
        data = np.array(data)
        
        if len(data) < n_clusters:
            return {"error": f"数据点数量({len(data)})少于聚类数量({n_clusters})"}
        
        # 数据标准化
        scaler = StandardScaler()
        data_scaled = scaler.fit_transform(data)
        
        # K-means聚类
        kmeans = KMeans(n_clusters=n_clusters, max_iter=max_iter, random_state=random_state, n_init=10)
        labels = kmeans.fit_predict(data_scaled)
        centers = scaler.inverse_transform(kmeans.cluster_centers_)
        
        # 计算多种评估指标
        silhouette_avg = silhouette_score(data_scaled, labels)
        calinski_harabasz = calinski_harabasz_score(data_scaled, labels)
        davies_bouldin = davies_bouldin_score(data_scaled, labels)
        inertia = kmeans.inertia_
        
        # 创建可视化
        if data.shape[1] == 2:
            # 2D数据直接可视化
            fig, axes = plt.subplots(2, 3, figsize=(18, 12))
            
            # 聚类结果
            scatter = axes[0, 0].scatter(data[:, 0], data[:, 1], c=labels, cmap='viridis', alpha=0.7)
            axes[0, 0].scatter(centers[:, 0], centers[:, 1], c='red', marker='x', s=200, linewidths=3)
            axes[0, 0].set_xlabel('特征 1')
            axes[0, 0].set_ylabel('特征 2')
            axes[0, 0].set_title(f'K-means聚类结果 (k={n_clusters})')
            plt.colorbar(scatter, ax=axes[0, 0])
            
            # 聚类中心距离
            from scipy.spatial.distance import pdist, squareform
            center_distances = squareform(pdist(centers))
            im = axes[0, 1].imshow(center_distances, cmap='viridis')
            axes[0, 1].set_title('聚类中心距离矩阵')
            axes[0, 1].set_xlabel('聚类中心')
            axes[0, 1].set_ylabel('聚类中心')
            plt.colorbar(im, ax=axes[0, 1])
            
            # 评估指标对比
            metrics = ['轮廓系数', 'CH指数', 'DB指数']
            values = [silhouette_avg, calinski_harabasz/1000, davies_bouldin]  # 归一化CH指数
            axes[0, 2].bar(metrics, values, alpha=0.7)
            axes[0, 2].set_title('聚类评估指标')
            axes[0, 2].set_ylabel('指标值')
            
            # 类内距离分布
            intra_distances = []
            for i in range(n_clusters):
                cluster_points = data[labels == i]
                if len(cluster_points) > 1:
                    cluster_center = centers[i]
                    distances = np.linalg.norm(cluster_points - cluster_center, axis=1)
                    intra_distances.extend(distances)
            
            axes[1, 0].hist(intra_distances, bins=20, alpha=0.7, edgecolor='black')
            axes[1, 0].set_title('类内距离分布')
            axes[1, 0].set_xlabel('距离')
            axes[1, 0].set_ylabel('频次')
            
            # 聚类大小分布
            unique, counts = np.unique(labels, return_counts=True)
            axes[1, 1].bar(unique, counts, alpha=0.7)
            axes[1, 1].set_title('各聚类大小分布')
            axes[1, 1].set_xlabel('聚类标签')
            axes[1, 1].set_ylabel('样本数量')
            
            # 类间距离矩阵热图
            sns.heatmap(center_distances, annot=True, fmt='.2f', cmap='viridis', ax=axes[1, 2])
            axes[1, 2].set_title('聚类中心距离热图')
            
        else:
            # 高维数据使用PCA降维
            pca = PCA(n_components=2)
            data_pca = pca.fit_transform(data_scaled)
            centers_pca = pca.transform(kmeans.cluster_centers_)
            
            fig, axes = plt.subplots(2, 2, figsize=(15, 12))
            
            # PCA降维后的聚类结果
            scatter = axes[0, 0].scatter(data_pca[:, 0], data_pca[:, 1], c=labels, cmap='viridis', alpha=0.7)
            axes[0, 0].scatter(centers_pca[:, 0], centers_pca[:, 1], c='red', marker='x', s=200, linewidths=3)
            axes[0, 0].set_xlabel(f'PC1 ({pca.explained_variance_ratio_[0]:.2%} 方差)')
            axes[0, 0].set_ylabel(f'PC2 ({pca.explained_variance_ratio_[1]:.2%} 方差)')
            axes[0, 0].set_title(f'K-means聚类结果 (PCA降维, k={n_clusters})')
            plt.colorbar(scatter, ax=axes[0, 0])
            
            # 特征重要性（PCA成分）
            feature_importance = np.abs(pca.components_).mean(axis=0)
            axes[0, 1].bar(range(len(feature_importance)), feature_importance, alpha=0.7)
            axes[0, 1].set_title('特征重要性 (PCA成分)')
            axes[0, 1].set_xlabel('特征索引')
            axes[0, 1].set_ylabel('重要性')
            
            # 聚类大小分布
            unique, counts = np.unique(labels, return_counts=True)
            axes[1, 0].bar(unique, counts, alpha=0.7)
            axes[1, 0].set_title('各聚类大小分布')
            axes[1, 0].set_xlabel('聚类标签')
            axes[1, 0].set_ylabel('样本数量')
            
            # PCA解释方差
            axes[1, 1].bar(range(len(pca.explained_variance_ratio_)), pca.explained_variance_ratio_, alpha=0.7)
            axes[1, 1].set_title('PCA解释方差比')
            axes[1, 1].set_xlabel('主成分')
            axes[1, 1].set_ylabel('解释方差比')
        
        plt.tight_layout()
        plot_base64 = create_plot_base64(fig)
        
        return {
            "labels": labels.tolist(),
            "cluster_centers": centers.tolist(),
            "n_clusters": n_clusters,
            "silhouette_score": float(silhouette_avg),
            "calinski_harabasz_score": float(calinski_harabasz),
            "davies_bouldin_score": float(davies_bouldin),
            "inertia": float(inertia),
            "n_iter": int(kmeans.n_iter_),
            "visualization": plot_base64,
            "evaluation_summary": {
                "silhouette_interpretation": f"轮廓系数: {silhouette_avg:.3f} (范围[-1,1], 越接近1越好)",
                "calinski_harabasz_interpretation": f"CH指数: {calinski_harabasz:.3f} (越大越好，表示类间分离度高且类内紧密度高)",
                "davies_bouldin_interpretation": f"DB指数: {davies_bouldin:.3f} (越小越好，表示类内距离小且类间距离大)",
                "inertia_interpretation": f"类内平方和: {inertia:.3f} (越小越好，表示类内紧密度高)",
                "clustering_quality": "优秀" if silhouette_avg > 0.7 else "良好" if silhouette_avg > 0.5 else "一般" if silhouette_avg > 0.25 else "较差"
            }
        }
        
    except Exception as e:
        return {"error": f"K-means聚类错误: {str(e)}"}

@mcp.tool()
async def silhouette_analysis_visualization(data: List[List[float]], labels: List[int]) -> Dict[str, Any]:
    """轮廓系数分析和可视化
    
    Args:
        data: 数据点列表
        labels: 聚类标签列表
        
    Returns:
        包含轮廓分析结果和可视化的字典
    """
    try:
        data = np.array(data)
        labels = np.array(labels)
        
        # 数据标准化
        scaler = StandardScaler()
        data_scaled = scaler.fit_transform(data)
        
        # 计算轮廓系数
        silhouette_avg = silhouette_score(data_scaled, labels)
        sample_silhouette_values = silhouette_samples(data_scaled, labels)
        
        n_clusters = len(set(labels))
        
        # 创建可视化
        if data.shape[1] == 2:
            # 2D数据
            fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 6))
        else:
            # 高维数据使用PCA降维
            pca = PCA(n_components=2)
            data_pca = pca.fit_transform(data_scaled)
            fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 6))
        
        # 轮廓图
        y_lower = 10
        for i in range(n_clusters):
            cluster_silhouette_values = sample_silhouette_values[labels == i]
            cluster_silhouette_values.sort()
            
            size_cluster_i = cluster_silhouette_values.shape[0]
            y_upper = y_lower + size_cluster_i
            
            color = plt.cm.nipy_spectral(float(i) / n_clusters)
            ax1.fill_betweenx(np.arange(y_lower, y_upper),
                             0, cluster_silhouette_values,
                             facecolor=color, edgecolor=color, alpha=0.7)
            
            ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
            y_lower = y_upper + 10
        
        ax1.set_xlabel('轮廓系数值')
        ax1.set_ylabel('聚类标签')
        ax1.set_title(f'各聚类的轮廓分析\n平均轮廓系数: {silhouette_avg:.3f}')
        
        # 添加平均轮廓系数线
        ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
        ax1.set_yticks([])
        ax1.set_xlim([-0.1, 1])
        ax1.set_ylim([0, len(data) + (n_clusters + 1) * 10])
        
        # 聚类结果可视化
        if data.shape[1] == 2:
            scatter = ax2.scatter(data[:, 0], data[:, 1], c=labels, cmap='nipy_spectral', alpha=0.7)
            ax2.set_xlabel('特征 1')
            ax2.set_ylabel('特征 2')
        else:
            scatter = ax2.scatter(data_pca[:, 0], data_pca[:, 1], c=labels, cmap='nipy_spectral', alpha=0.7)
            ax2.set_xlabel(f'PC1 ({pca.explained_variance_ratio_[0]:.2%} 方差)')
            ax2.set_ylabel(f'PC2 ({pca.explained_variance_ratio_[1]:.2%} 方差)')
        
        ax2.set_title(f'聚类结果可视化 (k={n_clusters})')
        
        plt.tight_layout()
        plot_base64 = create_plot_base64(fig)
        
        # 计算每个聚类的轮廓系数统计
        cluster_silhouette_stats = {}
        for i in range(n_clusters):
            cluster_values = sample_silhouette_values[labels == i]
            cluster_silhouette_stats[i] = {
                "mean": float(np.mean(cluster_values)),
                "std": float(np.std(cluster_values)),
                "min": float(np.min(cluster_values)),
                "max": float(np.max(cluster_values)),
                "size": int(np.sum(labels == i))
            }
        
        return {
            "silhouette_avg": float(silhouette_avg),
            "sample_silhouette_values": sample_silhouette_values.tolist(),
            "n_clusters": n_clusters,
            "cluster_silhouette_stats": cluster_silhouette_stats,
            "visualization": plot_base64,
            "quality_assessment": {
                "overall_quality": "优秀" if silhouette_avg > 0.7 else "良好" if silhouette_avg > 0.5 else "一般" if silhouette_avg > 0.25 else "较差",
                "interpretation": f"平均轮廓系数为 {silhouette_avg:.3f}，表示聚类质量{'优秀' if silhouette_avg > 0.7 else '良好' if silhouette_avg > 0.5 else '一般' if silhouette_avg > 0.25 else '较差'}",
                "recommendations": "轮廓系数接近1表示聚类效果很好，接近0表示聚类重叠，负值表示可能分类错误"
            }
        }
        
    except Exception as e:
        return {"error": f"轮廓分析错误: {str(e)}"}

@mcp.tool()
async def dbscan_clustering(data: List[List[float]], eps: float = 0.5, 
                           min_samples: int = 5) -> Dict[str, Any]:
    """DBSCAN密度聚类分析
    
    Args:
        data: 数据点列表
        eps: 邻域半径
        min_samples: 核心点的最小邻居数
        
    Returns:
        包含聚类结果和可视化的字典
    """
    try:
        data = np.array(data)
        
        # 数据标准化
        scaler = StandardScaler()
        data_scaled = scaler.fit_transform(data)
        
        # DBSCAN聚类
        dbscan = DBSCAN(eps=eps, min_samples=min_samples)
        labels = dbscan.fit_predict(data_scaled)
        
        # 计算聚类统计
        n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
        n_noise = list(labels).count(-1)
        
        # 计算评估指标（排除噪声点）
        if n_clusters > 1 and len(set(labels[labels != -1])) > 1:
            non_noise_mask = labels != -1
            if np.sum(non_noise_mask) > 1:
                silhouette_avg = silhouette_score(data_scaled[non_noise_mask], labels[non_noise_mask])
                calinski_harabasz = calinski_harabasz_score(data_scaled[non_noise_mask], labels[non_noise_mask])
                davies_bouldin = davies_bouldin_score(data_scaled[non_noise_mask], labels[non_noise_mask])
            else:
                silhouette_avg = 0
                calinski_harabasz = 0
                davies_bouldin = float('inf')
        else:
            silhouette_avg = 0
            calinski_harabasz = 0
            davies_bouldin = float('inf')
        
        # 创建可视化
        if data.shape[1] == 2:
            # 2D数据
            fig, axes = plt.subplots(2, 2, figsize=(15, 12))
            
            # 聚类结果（包含噪声点）
            unique_labels = set(labels)
            colors = [plt.cm.Spectral(each) for each in np.linspace(0, 1, len(unique_labels))]
            
            for k, col in zip(unique_labels, colors):
                if k == -1:
                    # 噪声点用黑色表示
                    col = [0, 0, 0, 1]
                
                class_member_mask = (labels == k)
                xy = data[class_member_mask]
                axes[0, 0].plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),
                               markeredgecolor='k', markersize=6, alpha=0.7)
            
            axes[0, 0].set_xlabel('特征 1')
            axes[0, 0].set_ylabel('特征 2')
            axes[0, 0].set_title(f'DBSCAN聚类结果\n聚类数: {n_clusters}, 噪声点: {n_noise}')
            
            # 核心点、边界点、噪声点分布
            core_samples_mask = np.zeros_like(dbscan.labels_, dtype=bool)
            core_samples_mask[dbscan.core_sample_indices_] = True
            
            # 核心点
            axes[0, 1].scatter(data[core_samples_mask, 0], data[core_samples_mask, 1], 
                              c='red', marker='o', s=50, alpha=0.7, label='核心点')
            # 边界点
            border_mask = (labels != -1) & (~core_samples_mask)
            axes[0, 1].scatter(data[border_mask, 0], data[border_mask, 1], 
                              c='blue', marker='s', s=50, alpha=0.7, label='边界点')
            # 噪声点
            noise_mask = labels == -1
            axes[0, 1].scatter(data[noise_mask, 0], data[noise_mask, 1], 
                              c='black', marker='x', s=50, alpha=0.7, label='噪声点')
            
            axes[0, 1].set_xlabel('特征 1')
            axes[0, 1].set_ylabel('特征 2')
            axes[0, 1].set_title('点类型分布')
            axes[0, 1].legend()
            
            # 聚类统计
            point_types = ['核心点', '边界点', '噪声点']
            counts = [len(dbscan.core_sample_indices_), np.sum(border_mask), n_noise]
            axes[1, 0].bar(point_types, counts, alpha=0.7, color=['red', 'blue', 'black'])
            axes[1, 0].set_title('点类型统计')
            axes[1, 0].set_ylabel('数量')
            
            # 聚类大小分布（排除噪声点）
            if n_clusters > 0:
                cluster_labels = labels[labels != -1]
                unique, counts = np.unique(cluster_labels, return_counts=True)
                axes[1, 1].bar(unique, counts, alpha=0.7)
                axes[1, 1].set_title('各聚类大小分布（排除噪声点）')
                axes[1, 1].set_xlabel('聚类标签')
                axes[1, 1].set_ylabel('样本数量')
            else:
                axes[1, 1].text(0.5, 0.5, '未发现有效聚类', ha='center', va='center', transform=axes[1, 1].transAxes)
                axes[1, 1].set_title('聚类分布')
            
        else:
            # 高维数据使用PCA降维
            pca = PCA(n_components=2)
            data_pca = pca.fit_transform(data_scaled)
            
            fig, axes = plt.subplots(2, 2, figsize=(15, 12))
            
            # PCA降维后的聚类结果
            unique_labels = set(labels)
            colors = [plt.cm.Spectral(each) for each in np.linspace(0, 1, len(unique_labels))]
            
            for k, col in zip(unique_labels, colors):
                if k == -1:
                    col = [0, 0, 0, 1]
                
                class_member_mask = (labels == k)
                xy = data_pca[class_member_mask]
                axes[0, 0].plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),
                               markeredgecolor='k', markersize=6, alpha=0.7)
            
            axes[0, 0].set_xlabel(f'PC1 ({pca.explained_variance_ratio_[0]:.2%} 方差)')
            axes[0, 0].set_ylabel(f'PC2 ({pca.explained_variance_ratio_[1]:.2%} 方差)')
            axes[0, 0].set_title(f'DBSCAN聚类结果 (PCA降维)\n聚类数: {n_clusters}, 噪声点: {n_noise}')
            
            # 点类型分布
            core_samples_mask = np.zeros_like(dbscan.labels_, dtype=bool)
            core_samples_mask[dbscan.core_sample_indices_] = True
            
            axes[0, 1].scatter(data_pca[core_samples_mask, 0], data_pca[core_samples_mask, 1], 
                              c='red', marker='o', s=50, alpha=0.7, label='核心点')
            border_mask = (labels != -1) & (~core_samples_mask)
            axes[0, 1].scatter(data_pca[border_mask, 0], data_pca[border_mask, 1], 
                              c='blue', marker='s', s=50, alpha=0.7, label='边界点')
            noise_mask = labels == -1
            axes[0, 1].scatter(data_pca[noise_mask, 0], data_pca[noise_mask, 1], 
                              c='black', marker='x', s=50, alpha=0.7, label='噪声点')
            
            axes[0, 1].set_xlabel(f'PC1 ({pca.explained_variance_ratio_[0]:.2%} 方差)')
            axes[0, 1].set_ylabel(f'PC2 ({pca.explained_variance_ratio_[1]:.2%} 方差)')
            axes[0, 1].set_title('点类型分布 (PCA降维)')
            axes[0, 1].legend()
            
            # 聚类统计
            point_types = ['核心点', '边界点', '噪声点']
            counts = [len(dbscan.core_sample_indices_), np.sum(border_mask), n_noise]
            axes[1, 0].bar(point_types, counts, alpha=0.7, color=['red', 'blue', 'black'])
            axes[1, 0].set_title('点类型统计')
            axes[1, 0].set_ylabel('数量')
            
            # PCA解释方差
            axes[1, 1].bar(range(len(pca.explained_variance_ratio_)), pca.explained_variance_ratio_, alpha=0.7)
            axes[1, 1].set_title('PCA解释方差比')
            axes[1, 1].set_xlabel('主成分')
            axes[1, 1].set_ylabel('解释方差比')
        
        plt.tight_layout()
        plot_base64 = create_plot_base64(fig)
        
        return {
            "labels": labels.tolist(),
            "n_clusters": n_clusters,
            "n_noise": n_noise,
            "eps": eps,
            "min_samples": min_samples,
            "core_sample_indices": dbscan.core_sample_indices_.tolist(),
            "silhouette_score": float(silhouette_avg) if silhouette_avg != 0 else None,
            "calinski_harabasz_score": float(calinski_harabasz) if calinski_harabasz != 0 else None,
            "davies_bouldin_score": float(davies_bouldin) if davies_bouldin != float('inf') else None,
            "visualization": plot_base64,
            "clustering_summary": {
                "total_points": len(data),
                "core_points": len(dbscan.core_sample_indices_),
                "border_points": int(np.sum(border_mask)),
                "noise_points": n_noise,
                "clusters_found": n_clusters,
                "noise_ratio": f"{(n_noise/len(data)*100):.1f}%"
            },
            "evaluation_summary": {
                "clustering_quality": "优秀" if silhouette_avg > 0.7 else "良好" if silhouette_avg > 0.5 else "一般" if silhouette_avg > 0.25 else "较差" if silhouette_avg > 0 else "无法评估",
                "noise_assessment": "噪声较少" if n_noise/len(data) < 0.1 else "噪声适中" if n_noise/len(data) < 0.2 else "噪声较多",
                "parameter_suggestion": f"当前参数 eps={eps}, min_samples={min_samples}。如果噪声过多，可以减小eps或增加min_samples；如果聚类过少，可以增大eps或减小min_samples。"
            }
        }
        
    except Exception as e:
        return {"error": f"DBSCAN聚类错误: {str(e)}"}

@mcp.tool()
async def hierarchical_clustering(data: List[List[float]], n_clusters: int = 3,
                                  linkage: str = "ward") -> Dict[str, Any]:
    """层次聚类分析
    
    Args:
        data: 数据点列表
        n_clusters: 聚类数量
        linkage: 链接方法 ("ward", "complete", "average", "single")
        
    Returns:
        包含聚类结果和可视化的字典
    """
    try:
        data = np.array(data)
        
        if len(data) < n_clusters:
            return {"error": f"数据点数量({len(data)})少于聚类数量({n_clusters})"}
        
        # 数据标准化
        scaler = StandardScaler()
        data_scaled = scaler.fit_transform(data)
        
        # 层次聚类
        hierarchical = AgglomerativeClustering(n_clusters=n_clusters, linkage=linkage)
        labels = hierarchical.fit_predict(data_scaled)
        
        # 计算多种评估指标
        silhouette_avg = silhouette_score(data_scaled, labels)
        calinski_harabasz = calinski_harabasz_score(data_scaled, labels)
        davies_bouldin = davies_bouldin_score(data_scaled, labels)
        
        # 创建树状图
        from scipy.cluster.hierarchy import dendrogram, linkage as scipy_linkage
        
        # 计算链接矩阵
        linkage_matrix = scipy_linkage(data_scaled, method=linkage)
        
        # 创建可视化
        if data.shape[1] == 2:
            # 2D数据
            fig, axes = plt.subplots(2, 2, figsize=(15, 12))
            
            # 聚类结果
            scatter = axes[0, 0].scatter(data[:, 0], data[:, 1], c=labels, cmap='viridis', alpha=0.7)
            axes[0, 0].set_xlabel('特征 1')
            axes[0, 0].set_ylabel('特征 2')
            axes[0, 0].set_title(f'层次聚类结果 (k={n_clusters}, {linkage})')
            plt.colorbar(scatter, ax=axes[0, 0])
            
            # 树状图
            dendrogram(linkage_matrix, ax=axes[0, 1], truncate_mode='level', p=10)
            axes[0, 1].set_title('聚类树状图')
            axes[0, 1].set_xlabel('样本索引')
            axes[0, 1].set_ylabel('距离')
            
            # 聚类大小分布
            unique, counts = np.unique(labels, return_counts=True)
            axes[1, 0].bar(unique, counts, alpha=0.7)
            axes[1, 0].set_title('各聚类大小分布')
            axes[1, 0].set_xlabel('聚类标签')
            axes[1, 0].set_ylabel('样本数量')
            
            # 距离分布
            distances = linkage_matrix[:, 2]
            axes[1, 1].hist(distances, bins=20, alpha=0.7, edgecolor='black')
            axes[1, 1].set_title('聚类距离分布')
            axes[1, 1].set_xlabel('距离')
            axes[1, 1].set_ylabel('频次')
            
        else:
            # 高维数据使用PCA降维
            pca = PCA(n_components=2)
            data_pca = pca.fit_transform(data_scaled)
            
            fig, axes = plt.subplots(2, 2, figsize=(15, 12))
            
            # PCA降维后的聚类结果
            scatter = axes[0, 0].scatter(data_pca[:, 0], data_pca[:, 1], c=labels, cmap='viridis', alpha=0.7)
            axes[0, 0].set_xlabel(f'PC1 ({pca.explained_variance_ratio_[0]:.2%} 方差)')
            axes[0, 0].set_ylabel(f'PC2 ({pca.explained_variance_ratio_[1]:.2%} 方差)')
            axes[0, 0].set_title(f'层次聚类结果 (PCA降维, k={n_clusters}, {linkage})')
            plt.colorbar(scatter, ax=axes[0, 0])
            
            # 树状图
            dendrogram(linkage_matrix, ax=axes[0, 1], truncate_mode='level', p=10)
            axes[0, 1].set_title('聚类树状图')
            axes[0, 1].set_xlabel('样本索引')
            axes[0, 1].set_ylabel('距离')
            
            # 聚类大小分布
            unique, counts = np.unique(labels, return_counts=True)
            axes[1, 0].bar(unique, counts, alpha=0.7)
            axes[1, 0].set_title('各聚类大小分布')
            axes[1, 0].set_xlabel('聚类标签')
            axes[1, 0].set_ylabel('样本数量')
            
            # PCA解释方差
            axes[1, 1].bar(range(len(pca.explained_variance_ratio_)), pca.explained_variance_ratio_, alpha=0.7)
            axes[1, 1].set_title('PCA解释方差比')
            axes[1, 1].set_xlabel('主成分')
            axes[1, 1].set_ylabel('解释方差比')
        
        plt.tight_layout()
        plot_base64 = create_plot_base64(fig)
        
        return {
            "labels": labels.tolist(),
            "n_clusters": n_clusters,
            "linkage_method": linkage,
            "silhouette_score": float(silhouette_avg),
            "calinski_harabasz_score": float(calinski_harabasz),
            "davies_bouldin_score": float(davies_bouldin),
            "linkage_matrix": linkage_matrix.tolist(),
            "cluster_sizes": dict(zip(unique.tolist(), counts.tolist())),
            "visualization": plot_base64,
            "evaluation_summary": {
                "silhouette_interpretation": f"轮廓系数: {silhouette_avg:.3f} (范围[-1,1], 越接近1越好)",
                "calinski_harabasz_interpretation": f"CH指数: {calinski_harabasz:.3f} (越大越好，表示类间分离度高且类内紧密度高)",
                "davies_bouldin_interpretation": f"DB指数: {davies_bouldin:.3f} (越小越好，表示类内距离小且类间距离大)",
                "clustering_quality": "优秀" if silhouette_avg > 0.7 else "良好" if silhouette_avg > 0.5 else "一般" if silhouette_avg > 0.25 else "较差"
            }
        }
        
    except Exception as e:
        return {"error": f"层次聚类错误: {str(e)}"}

async def main():
    # 使用stdio传输运行服务器
    async with mcp.server.stdio.stdio_server() as (read_stream, write_stream):
        await server.run(
            read_stream,
            write_stream,
            InitializationOptions(
                server_name="clustering-analysis",
                server_version="1.0.0",
                capabilities=server.get_capabilities(
                    notification_options=NotificationOptions(),
                    experimental_capabilities={},
                ),
            ),
        )
@mcp.tool()
def svm_classification(X_data: List[List[float]], y_data: List[int], 
                      kernel: str = "rbf", C: float = 1.0, 
                      test_size: float = 0.3, random_state: int = 42) -> Dict[str, Any]:
    """支持向量机分类
    
    Args:
        X_data: 特征数据（每行一个样本）
        y_data: 标签数据
        kernel: 核函数类型 ("linear", "poly", "rbf", "sigmoid")
        C: 正则化参数
        test_size: 测试集比例
        random_state: 随机种子
        
    Returns:
        包含分类结果和可视化的字典
    """
    try:
        X = np.array(X_data)
        y = np.array(y_data)
        
        # 数据分割
        X_train, X_test, y_train, y_test = train_test_split(
            X, y, test_size=test_size, random_state=random_state, stratify=y
        )
        
        # 训练SVM模型
        svm_model = SVC(kernel=kernel, C=C, random_state=random_state, probability=True)
        svm_model.fit(X_train, y_train)
        
        # 预测
        y_pred = svm_model.predict(X_test)
        y_prob = svm_model.predict_proba(X_test)
        
        # 计算准确率
        accuracy = accuracy_score(y_test, y_pred)
        
        # 生成分类报告
        class_report = classification_report(y_test, y_pred, output_dict=True)
        
        # 创建可视化
        if X.shape[1] == 2:
            # 2D数据可视化
            fig, axes = plt.subplots(2, 2, figsize=(15, 12))
            
            # 原始数据分布
            unique_labels = np.unique(y)
            colors = plt.cm.Set1(np.linspace(0, 1, len(unique_labels)))
            
            for i, label in enumerate(unique_labels):
                mask = y == label
                axes[0, 0].scatter(X[mask, 0], X[mask, 1], c=[colors[i]], 
                                 label=f'类别 {label}', alpha=0.7)
            axes[0, 0].set_title('原始数据分布')
            axes[0, 0].set_xlabel('特征1')
            axes[0, 0].set_ylabel('特征2')
            axes[0, 0].legend()
            axes[0, 0].grid(True)
            
            # 决策边界
            h = 0.02
            x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
            y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
            xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
                               np.arange(y_min, y_max, h))
            
            Z = svm_model.predict(np.c_[xx.ravel(), yy.ravel()])
            Z = Z.reshape(xx.shape)
            
            axes[0, 1].contourf(xx, yy, Z, alpha=0.3, cmap=plt.cm.Set1)
            for i, label in enumerate(unique_labels):
                mask_train = y_train == label
                mask_test = y_test == label
                axes[0, 1].scatter(X_train[mask_train, 0], X_train[mask_train, 1], 
                                 c=[colors[i]], marker='o', label=f'训练-类别{label}', alpha=0.7)
                axes[0, 1].scatter(X_test[mask_test, 0], X_test[mask_test, 1], 
                                 c=[colors[i]], marker='^', label=f'测试-类别{label}', alpha=0.9)
            
            # 标记支持向量
            if hasattr(svm_model, 'support_vectors_'):
                axes[0, 1].scatter(svm_model.support_vectors_[:, 0], 
                                 svm_model.support_vectors_[:, 1], 
                                 s=100, facecolors='none', edgecolors='k', 
                                 linewidth=2, label='支持向量')
            
            axes[0, 1].set_title(f'SVM决策边界 (核函数: {kernel})')
            axes[0, 1].set_xlabel('特征1')
            axes[0, 1].set_ylabel('特征2')
            axes[0, 1].legend()
            
            # 混淆矩阵
            from sklearn.metrics import confusion_matrix
            cm = confusion_matrix(y_test, y_pred)
            im = axes[1, 0].imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
            axes[1, 0].set_title('混淆矩阵')
            tick_marks = np.arange(len(unique_labels))
            axes[1, 0].set_xticks(tick_marks)
            axes[1, 0].set_yticks(tick_marks)
            axes[1, 0].set_xticklabels(unique_labels)
            axes[1, 0].set_yticklabels(unique_labels)
            axes[1, 0].set_xlabel('预测标签')
            axes[1, 0].set_ylabel('真实标签')
            
            # 添加数值标注
            thresh = cm.max() / 2.
            for i in range(cm.shape[0]):
                for j in range(cm.shape[1]):
                    axes[1, 0].text(j, i, format(cm[i, j], 'd'),
                                   ha="center", va="center",
                                   color="white" if cm[i, j] > thresh else "black")
            
            # 性能指标
            metrics_text = f"准确率: {accuracy:.4f}\n"
            metrics_text += f"核函数: {kernel}\n"
            metrics_text += f"C参数: {C}\n"
            metrics_text += f"支持向量数: {len(svm_model.support_vectors_)}\n\n"
            
            for label in unique_labels:
                if str(label) in class_report:
                    precision = class_report[str(label)]['precision']
                    recall = class_report[str(label)]['recall']
                    f1 = class_report[str(label)]['f1-score']
                    metrics_text += f"类别 {label}:\n"
                    metrics_text += f"  精确率: {precision:.4f}\n"
                    metrics_text += f"  召回率: {recall:.4f}\n"
                    metrics_text += f"  F1分数: {f1:.4f}\n\n"
            
            axes[1, 1].text(0.1, 0.9, metrics_text, transform=axes[1, 1].transAxes, 
                          fontsize=10, verticalalignment='top',
                          bbox=dict(boxstyle='round', facecolor='lightblue', alpha=0.8))
            axes[1, 1].set_title('分类性能指标')
            axes[1, 1].axis('off')
            
        else:
            # 高维数据，使用PCA降维可视化
            from sklearn.decomposition import PCA
            
            fig, axes = plt.subplots(2, 2, figsize=(15, 12))
            
            # PCA降维到2D
            pca = PCA(n_components=2)
            X_pca = pca.fit_transform(X)
            X_train_pca = pca.transform(X_train)
            X_test_pca = pca.transform(X_test)
            
            # PCA可视化
            unique_labels = np.unique(y)
            colors = plt.cm.Set1(np.linspace(0, 1, len(unique_labels)))
            
            for i, label in enumerate(unique_labels):
                mask = y == label
                axes[0, 0].scatter(X_pca[mask, 0], X_pca[mask, 1], c=[colors[i]], 
                                 label=f'类别 {label}', alpha=0.7)
            axes[0, 0].set_title(f'PCA降维可视化 (解释方差: {pca.explained_variance_ratio_.sum():.3f})')
            axes[0, 0].set_xlabel(f'PC1 ({pca.explained_variance_ratio_[0]:.3f})')
            axes[0, 0].set_ylabel(f'PC2 ({pca.explained_variance_ratio_[1]:.3f})')
            axes[0, 0].legend()
            axes[0, 0].grid(True)
            
            # 特征重要性（对于线性核）
            if kernel == 'linear' and hasattr(svm_model, 'coef_'):
                feature_importance = np.abs(svm_model.coef_[0])
                feature_names = [f'特征{i+1}' for i in range(len(feature_importance))]
                
                axes[0, 1].bar(range(len(feature_importance)), feature_importance)
                axes[0, 1].set_title('特征重要性 (线性核)')
                axes[0, 1].set_xlabel('特征')
                axes[0, 1].set_ylabel('重要性')
                axes[0, 1].set_xticks(range(len(feature_importance)))
                axes[0, 1].set_xticklabels(feature_names, rotation=45)
            else:
                axes[0, 1].text(0.5, 0.5, f'特征重要性\n(仅适用于线性核)\n当前核函数: {kernel}', 
                              ha='center', va='center', transform=axes[0, 1].transAxes)
                axes[0, 1].set_title('特征重要性')
            
            # 混淆矩阵
            from sklearn.metrics import confusion_matrix
            cm = confusion_matrix(y_test, y_pred)
            im = axes[1, 0].imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
            axes[1, 0].set_title('混淆矩阵')
            tick_marks = np.arange(len(unique_labels))
            axes[1, 0].set_xticks(tick_marks)
            axes[1, 0].set_yticks(tick_marks)
            axes[1, 0].set_xticklabels(unique_labels)
            axes[1, 0].set_yticklabels(unique_labels)
            axes[1, 0].set_xlabel('预测标签')
            axes[1, 0].set_ylabel('真实标签')
            
            # 添加数值标注
            thresh = cm.max() / 2.
            for i in range(cm.shape[0]):
                for j in range(cm.shape[1]):
                    axes[1, 0].text(j, i, format(cm[i, j], 'd'),
                                   ha="center", va="center",
                                   color="white" if cm[i, j] > thresh else "black")
            
            # 性能指标
            metrics_text = f"准确率: {accuracy:.4f}\n"
            metrics_text += f"特征维度: {X.shape[1]}\n"
            metrics_text += f"核函数: {kernel}\n"
            metrics_text += f"C参数: {C}\n"
            metrics_text += f"支持向量数: {len(svm_model.support_vectors_)}\n\n"
            
            for label in unique_labels:
                if str(label) in class_report:
                    precision = class_report[str(label)]['precision']
                    recall = class_report[str(label)]['recall']
                    f1 = class_report[str(label)]['f1-score']
                    metrics_text += f"类别 {label}:\n"
                    metrics_text += f"  精确率: {precision:.4f}\n"
                    metrics_text += f"  召回率: {recall:.4f}\n"
                    metrics_text += f"  F1分数: {f1:.4f}\n\n"
            
            axes[1, 1].text(0.1, 0.9, metrics_text, transform=axes[1, 1].transAxes, 
                          fontsize=10, verticalalignment='top',
                          bbox=dict(boxstyle='round', facecolor='lightblue', alpha=0.8))
            axes[1, 1].set_title('分类性能指标')
            axes[1, 1].axis('off')
        
        plt.tight_layout()
        plot_base64 = create_plot_base64(fig)
        
        return {
            "accuracy": float(accuracy),
            "classification_report": class_report,
            "support_vectors_count": len(svm_model.support_vectors_),
            "kernel": kernel,
            "C_parameter": C,
            "test_predictions": y_pred.tolist(),
            "test_probabilities": y_prob.tolist(),
            "feature_dimensions": X.shape[1],
            "visualization": plot_base64
        }
        
    except Exception as e:
        return {"error": f"SVM分类错误: {str(e)}"}

@mcp.tool()
def decision_tree_classification(X_data: List[List[float]], y_data: List[int], 
                               max_depth: Optional[int] = None, min_samples_split: int = 2,
                               min_samples_leaf: int = 1, test_size: float = 0.3, 
                               random_state: int = 42, X_predict: Optional[List[List[float]]] = None) -> Dict[str, Any]:
    """增强的决策树分类
    
    Args:
        X_data: 特征数据（每行一个样本）
        y_data: 标签数据
        max_depth: 最大深度
        min_samples_split: 分割所需的最小样本数
        min_samples_leaf: 叶节点最小样本数
        test_size: 测试集比例
        random_state: 随机种子
        X_predict: 待预测的新数据（可选）
        
    Returns:
        包含分类结果和决策树可视化的字典
    """
    try:
        X = np.array(X_data, dtype=float)
        y = np.array(y_data)
        
        # 处理标签编码
        from sklearn.preprocessing import LabelEncoder
        label_encoder = LabelEncoder()
        if y.dtype == 'object' or isinstance(y[0], str):
            y_encoded = label_encoder.fit_transform(y)
            class_names = label_encoder.classes_.tolist()
        else:
            y_encoded = y.astype(int)
            class_names = [str(i) for i in sorted(set(y_encoded))]
        
        # 检查数据集大小，如果太小则调整test_size
        n_samples = len(X)
        unique_classes = len(set(y_encoded))
        
        if n_samples < 10 or unique_classes < 2:
            # 小数据集：使用全部数据训练，不分割
            X_train, X_test = X, X
            y_train, y_test = y_encoded, y_encoded
            test_size_used = 0.0
        else:
            # 确保每个类别至少有2个样本用于分割
            min_class_count = min([sum(y_encoded == cls) for cls in set(y_encoded)])
            if min_class_count < 2:
                # 如果某个类别样本太少，使用全部数据训练
                X_train, X_test = X, X
                y_train, y_test = y_encoded, y_encoded
                test_size_used = 0.0
            else:
                # 正常分割
                try:
                    X_train, X_test, y_train, y_test = train_test_split(
                        X, y_encoded, test_size=test_size, random_state=random_state, stratify=y_encoded
                    )
                    test_size_used = test_size
                except ValueError:
                    # 分层抽样失败，使用简单分割
                    X_train, X_test, y_train, y_test = train_test_split(
                        X, y_encoded, test_size=test_size, random_state=random_state
                    )
                    test_size_used = test_size
        
        # 训练决策树模型
        dt_model = DecisionTreeClassifier(
            max_depth=max_depth, 
            min_samples_split=min_samples_split,
            min_samples_leaf=min_samples_leaf,
            random_state=random_state
        )
        dt_model.fit(X_train, y_train)
        
        # 预测测试集
        y_pred = dt_model.predict(X_test)
        y_prob = dt_model.predict_proba(X_test)
        
        # 预测新数据（如果提供）
        new_predictions = None
        new_probabilities = None
        new_predictions_decoded = None
        
        if X_predict is not None:
            X_predict_array = np.array(X_predict, dtype=float)
            new_predictions = dt_model.predict(X_predict_array)
            new_probabilities = dt_model.predict_proba(X_predict_array)
            
            # 解码预测结果
            if y.dtype == 'object' or isinstance(y[0], str):
                new_predictions_decoded = label_encoder.inverse_transform(new_predictions).tolist()
            else:
                new_predictions_decoded = new_predictions.tolist()
        
        # 计算准确率
        if test_size_used > 0:
            accuracy = accuracy_score(y_test, y_pred)
        else:
            # 小数据集情况下，使用训练准确率
            accuracy = accuracy_score(y_train, dt_model.predict(X_train))
        
        # 生成分类报告
        class_report = classification_report(y_test, y_pred, output_dict=True)
        
        # 特征重要性
        feature_importance = dt_model.feature_importances_
        
        # 创建可视化
        fig, axes = plt.subplots(2, 2, figsize=(20, 16))
        
        # 决策树结构可视化
        plot_tree(dt_model, ax=axes[0, 0], filled=True, feature_names=[f'特征{i+1}' for i in range(X.shape[1])],
                 class_names=[f'类别{i}' for i in np.unique(y)], rounded=True, fontsize=8)
        axes[0, 0].set_title('决策树结构')
        
        # 特征重要性
        feature_names = [f'特征{i+1}' for i in range(len(feature_importance))]
        sorted_idx = np.argsort(feature_importance)[::-1]
        
        axes[0, 1].bar(range(len(feature_importance)), feature_importance[sorted_idx])
        axes[0, 1].set_title('特征重要性')
        axes[0, 1].set_xlabel('特征')
        axes[0, 1].set_ylabel('重要性')
        axes[0, 1].set_xticks(range(len(feature_importance)))
        axes[0, 1].set_xticklabels([feature_names[i] for i in sorted_idx], rotation=45)
        
        # 混淆矩阵
        from sklearn.metrics import confusion_matrix
        cm = confusion_matrix(y_test, y_pred)
        unique_labels = np.unique(y)
        im = axes[1, 0].imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
        axes[1, 0].set_title('混淆矩阵')
        tick_marks = np.arange(len(unique_labels))
        axes[1, 0].set_xticks(tick_marks)
        axes[1, 0].set_yticks(tick_marks)
        axes[1, 0].set_xticklabels(unique_labels)
        axes[1, 0].set_yticklabels(unique_labels)
        axes[1, 0].set_xlabel('预测标签')
        axes[1, 0].set_ylabel('真实标签')
        
        # 添加数值标注
        thresh = cm.max() / 2.
        for i in range(cm.shape[0]):
            for j in range(cm.shape[1]):
                axes[1, 0].text(j, i, format(cm[i, j], 'd'),
                               ha="center", va="center",
                               color="white" if cm[i, j] > thresh else "black")
        
        # 性能指标和树信息
        metrics_text = f"准确率: {accuracy:.4f}\n"
        metrics_text += f"树深度: {dt_model.get_depth()}\n"
        metrics_text += f"叶节点数: {dt_model.get_n_leaves()}\n"
        metrics_text += f"特征维度: {X.shape[1]}\n"
        metrics_text += f"最大深度限制: {max_depth if max_depth else '无限制'}\n"
        metrics_text += f"最小分割样本: {min_samples_split}\n"
        metrics_text += f"最小叶节点样本: {min_samples_leaf}\n\n"
        
        # 各类别性能
        for label in unique_labels:
            if str(label) in class_report:
                precision = class_report[str(label)]['precision']
                recall = class_report[str(label)]['recall']
                f1 = class_report[str(label)]['f1-score']
                metrics_text += f"类别 {label}:\n"
                metrics_text += f"  精确率: {precision:.4f}\n"
                metrics_text += f"  召回率: {recall:.4f}\n"
                metrics_text += f"  F1分数: {f1:.4f}\n\n"
        
        # 最重要的特征
        top_features = sorted_idx[:min(5, len(feature_importance))]
        metrics_text += "最重要特征:\n"
        for i, feat_idx in enumerate(top_features):
            metrics_text += f"  {feature_names[feat_idx]}: {feature_importance[feat_idx]:.4f}\n"
        
        axes[1, 1].text(0.05, 0.95, metrics_text, transform=axes[1, 1].transAxes, 
                       fontsize=10, verticalalignment='top',
                       bbox=dict(boxstyle='round', facecolor='lightgreen', alpha=0.8))
        axes[1, 1].set_title('分类性能指标')
        axes[1, 1].axis('off')
        
        plt.tight_layout()
        plot_base64 = create_plot_base64(fig)
        
        return {
            "accuracy": float(accuracy),
            "classification_report": class_report,
            "tree_depth": dt_model.get_depth(),
            "n_leaves": dt_model.get_n_leaves(),
            "feature_importances": feature_importance.tolist(),
            "feature_names": feature_names,
            "class_names": class_names,
            "test_predictions": y_pred.tolist(),
            "test_probabilities": y_prob.tolist(),
            "test_size_used": test_size_used,
            "training_samples": len(X_train),
            "test_samples": len(X_test),
            "new_predictions": new_predictions_decoded,
            "new_predictions_raw": new_predictions.tolist() if new_predictions is not None else None,
            "new_probabilities": new_probabilities.tolist() if new_probabilities is not None else None,
            "max_depth": max_depth,
            "min_samples_split": min_samples_split,
            "min_samples_leaf": min_samples_leaf,
            "visualization": plot_base64
        }
        
    except Exception as e:
        return {"error": f"决策树分类错误: {str(e)}"}

@mcp.tool()
def train_classification_model(X_train: List[List[float]], y_train: List[Union[int, str]], 
                              algorithm: str = 'random_forest', 
                              test_size: float = 0.2, random_state: int = 42,
                              enable_feature_selection: bool = False,
                              enable_hyperparameter_tuning: bool = False,
                              enable_ensemble: bool = False,
                              cross_validation: bool = True,
                              enable_model_interpretation: bool = True,
                              enable_performance_monitoring: bool = True,
                              auto_model_selection: bool = False,
                              **hyperparams) -> Dict[str, Any]:
    """
    增强的机器学习分类模型训练
    
    Args:
        X_train: 训练特征数据
        y_train: 训练标签数据
        algorithm: 分类算法 ('logistic_regression', 'decision_tree', 'random_forest', 'svm', 'knn', 'naive_bayes', 'xgboost', 'lightgbm', 'neural_network', 'ensemble')
        test_size: 测试集比例
        random_state: 随机种子
        enable_feature_selection: 是否启用特征选择
        enable_hyperparameter_tuning: 是否启用超参数调优
        enable_ensemble: 是否启用集成学习
        cross_validation: 是否使用交叉验证
        enable_model_interpretation: 是否启用模型解释性分析
        enable_performance_monitoring: 是否启用性能监控和诊断
        auto_model_selection: 是否启用自动模型选择
        **hyperparams: 模型超参数
    
    Returns:
        训练结果和模型评估
    """
    try:
        from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score, StratifiedKFold
        from sklearn.preprocessing import StandardScaler, LabelEncoder, RobustScaler, MinMaxScaler
        from sklearn.feature_selection import SelectKBest, f_classif, mutual_info_classif, RFE
        from sklearn.linear_model import LogisticRegression
        from sklearn.tree import DecisionTreeClassifier
        from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, VotingClassifier, AdaBoostClassifier
        from sklearn.svm import SVC
        from sklearn.neighbors import KNeighborsClassifier
        from sklearn.naive_bayes import GaussianNB
        from sklearn.neural_network import MLPClassifier
        from sklearn.metrics import (
            accuracy_score, precision_score, recall_score, f1_score,
            classification_report, confusion_matrix, roc_auc_score, roc_curve,
            matthews_corrcoef, cohen_kappa_score, balanced_accuracy_score
        )
        
        # 尝试导入XGBoost和LightGBM
        try:
            import xgboost as xgb
            XGBOOST_AVAILABLE = True
        except ImportError:
            XGBOOST_AVAILABLE = False
            
        try:
            import lightgbm as lgb
            LIGHTGBM_AVAILABLE = True
        except ImportError:
            LIGHTGBM_AVAILABLE = False
        
        # 数据预处理
        X = np.array(X_train)
        y = np.array(y_train)
        
        # 标签编码（如果是字符串标签）
        label_encoder = LabelEncoder()
        if y.dtype == 'object' or isinstance(y[0], str):
            y_encoded = label_encoder.fit_transform(y)
            class_names = label_encoder.classes_.tolist()
        else:
            y_encoded = y.astype(int)
            class_names = list(set(y_encoded))
        
        # 自动模型选择
        auto_selection_results = None
        if auto_model_selection:
            print("正在进行自动模型选择...")
            candidate_algorithms = ['logistic_regression', 'random_forest', 'svm', 'xgboost', 'gradient_boosting']
            best_score = 0
            best_algorithm = algorithm
            
            # 快速评估各算法性能
            for candidate in candidate_algorithms:
                try:
                    # 使用小样本快速评估
                    sample_size = min(1000, len(X))
                    if len(X) > sample_size:
                        sample_indices = np.random.choice(len(X), sample_size, replace=False)
                        X_sample = X[sample_indices]
                        y_sample = y_encoded[sample_indices]
                    else:
                        X_sample = X
                        y_sample = y_encoded
                    
                    # 简单的交叉验证评估
                    from sklearn.model_selection import cross_val_score
                    if candidate == 'logistic_regression':
                        temp_model = LogisticRegression(random_state=random_state, max_iter=500)
                    elif candidate == 'random_forest':
                        temp_model = RandomForestClassifier(n_estimators=50, random_state=random_state)
                    elif candidate == 'svm':
                        temp_model = SVC(random_state=random_state, probability=True)
                    elif candidate == 'gradient_boosting':
                        temp_model = GradientBoostingClassifier(n_estimators=50, random_state=random_state)
                    elif candidate == 'xgboost' and XGBOOST_AVAILABLE:
                        temp_model = xgb.XGBClassifier(n_estimators=50, random_state=random_state, eval_metric='logloss')
                    else:
                        continue
                    
                    scores = cross_val_score(temp_model, X_sample, y_sample, cv=3, scoring='f1_weighted')
                    avg_score = scores.mean()
                    
                    if avg_score > best_score:
                        best_score = avg_score
                        best_algorithm = candidate
                        
                except Exception as e:
                    print(f"算法 {candidate} 评估失败: {str(e)}")
                    continue
            
            algorithm = best_algorithm
            auto_selection_results = {
                'selected_algorithm': best_algorithm,
                'selection_score': float(best_score),
                'evaluated_algorithms': candidate_algorithms
            }
            print(f"自动选择算法: {best_algorithm} (评分: {best_score:.3f})")
        
        # 数据分割
        X_train_split, X_test_split, y_train_split, y_test_split = train_test_split(
            X, y_encoded, test_size=test_size, random_state=random_state, stratify=y_encoded
        )
        
        # 特征工程和选择
        feature_selection_info = None
        if enable_feature_selection and X.shape[1] > 5:  # 只有特征数量足够时才进行特征选择
            # 使用多种特征选择方法
            selector_methods = {
                'f_classif': SelectKBest(f_classif, k=min(10, X.shape[1]//2)),
                'mutual_info': SelectKBest(mutual_info_classif, k=min(10, X.shape[1]//2))
            }
            
            best_score = 0
            best_selector = None
            best_method = None
            
            for method_name, selector in selector_methods.items():
                X_selected = selector.fit_transform(X_train_split, y_train_split)
                # 使用简单的逻辑回归评估特征选择效果
                temp_model = LogisticRegression(random_state=random_state, max_iter=1000)
                scores = cross_val_score(temp_model, X_selected, y_train_split, cv=3, scoring='f1_weighted')
                avg_score = scores.mean()
                
                if avg_score > best_score:
                    best_score = avg_score
                    best_selector = selector
                    best_method = method_name
            
            if best_selector is not None:
                X_train_split = best_selector.transform(X_train_split)
                X_test_split = best_selector.transform(X_test_split)
                selected_features = best_selector.get_support(indices=True)
                feature_selection_info = {
                    'method': best_method,
                    'selected_features': selected_features.tolist(),
                    'n_features_selected': len(selected_features),
                    'selection_score': float(best_score)
                }
        
        # 智能特征缩放选择
        scaler_type = hyperparams.get('scaler', 'auto')
        if scaler_type == 'auto':
            # 根据数据分布自动选择缩放器
            data_range = np.max(X_train_split) - np.min(X_train_split)
            data_std = np.std(X_train_split)
            
            if data_range > 1000 or data_std > 100:  # 数据范围很大
                scaler = RobustScaler()  # 对异常值更鲁棒
            elif np.any(X_train_split < 0):  # 有负值
                scaler = StandardScaler()  # 标准化
            else:
                scaler = MinMaxScaler()  # 归一化到[0,1]
        elif scaler_type == 'standard':
            scaler = StandardScaler()
        elif scaler_type == 'robust':
            scaler = RobustScaler()
        elif scaler_type == 'minmax':
            scaler = MinMaxScaler()
        else:
            scaler = StandardScaler()  # 默认
        
        X_train_scaled = scaler.fit_transform(X_train_split)
        X_test_scaled = scaler.transform(X_test_split)
        
        # 选择和配置模型
        def get_model_and_data(algorithm, hyperparams, random_state):
            """根据算法选择模型和数据"""
            if algorithm == 'logistic_regression':
                model = LogisticRegression(
                    random_state=random_state,
                    max_iter=hyperparams.get('max_iter', 1000),
                    C=hyperparams.get('C', 1.0),
                    solver=hyperparams.get('solver', 'lbfgs'),
                    penalty=hyperparams.get('penalty', 'l2')
                )
                return model, X_train_scaled, X_test_scaled
                
            elif algorithm == 'decision_tree':
                model = DecisionTreeClassifier(
                    random_state=random_state,
                    max_depth=hyperparams.get('max_depth', None),
                    min_samples_split=hyperparams.get('min_samples_split', 2),
                    min_samples_leaf=hyperparams.get('min_samples_leaf', 1),
                    criterion=hyperparams.get('criterion', 'gini'),
                    max_features=hyperparams.get('max_features', None)
                )
                return model, X_train_split, X_test_split
                
            elif algorithm == 'random_forest':
                model = RandomForestClassifier(
                    random_state=random_state,
                    n_estimators=hyperparams.get('n_estimators', 100),
                    max_depth=hyperparams.get('max_depth', None),
                    min_samples_split=hyperparams.get('min_samples_split', 2),
                    min_samples_leaf=hyperparams.get('min_samples_leaf', 1),
                    max_features=hyperparams.get('max_features', 'sqrt'),
                    bootstrap=hyperparams.get('bootstrap', True)
                )
                return model, X_train_split, X_test_split
                
            elif algorithm == 'gradient_boosting':
                model = GradientBoostingClassifier(
                    random_state=random_state,
                    n_estimators=hyperparams.get('n_estimators', 100),
                    learning_rate=hyperparams.get('learning_rate', 0.1),
                    max_depth=hyperparams.get('max_depth', 3),
                    subsample=hyperparams.get('subsample', 1.0)
                )
                return model, X_train_split, X_test_split
                
            elif algorithm == 'xgboost':
                if not XGBOOST_AVAILABLE:
                    raise ImportError("XGBoost未安装，请使用: pip install xgboost")
                model = xgb.XGBClassifier(
                    random_state=random_state,
                    n_estimators=hyperparams.get('n_estimators', 100),
                    learning_rate=hyperparams.get('learning_rate', 0.1),
                    max_depth=hyperparams.get('max_depth', 6),
                    subsample=hyperparams.get('subsample', 1.0),
                    colsample_bytree=hyperparams.get('colsample_bytree', 1.0),
                    eval_metric='logloss'
                )
                return model, X_train_split, X_test_split
                
            elif algorithm == 'lightgbm':
                if not LIGHTGBM_AVAILABLE:
                    raise ImportError("LightGBM未安装，请使用: pip install lightgbm")
                model = lgb.LGBMClassifier(
                    random_state=random_state,
                    n_estimators=hyperparams.get('n_estimators', 100),
                    learning_rate=hyperparams.get('learning_rate', 0.1),
                    max_depth=hyperparams.get('max_depth', -1),
                    num_leaves=hyperparams.get('num_leaves', 31),
                    subsample=hyperparams.get('subsample', 1.0),
                    colsample_bytree=hyperparams.get('colsample_bytree', 1.0),
                    verbose=-1
                )
                return model, X_train_split, X_test_split
                
            elif algorithm == 'svm':
                model = SVC(
                    random_state=random_state,
                    C=hyperparams.get('C', 1.0),
                    kernel=hyperparams.get('kernel', 'rbf'),
                    gamma=hyperparams.get('gamma', 'scale'),
                    probability=True
                )
                return model, X_train_scaled, X_test_scaled
                
            elif algorithm == 'knn':
                model = KNeighborsClassifier(
                    n_neighbors=hyperparams.get('n_neighbors', 5),
                    weights=hyperparams.get('weights', 'uniform'),
                    metric=hyperparams.get('metric', 'minkowski'),
                    algorithm=hyperparams.get('algorithm', 'auto')
                )
                return model, X_train_scaled, X_test_scaled
                
            elif algorithm == 'naive_bayes':
                model = GaussianNB(
                    var_smoothing=hyperparams.get('var_smoothing', 1e-9)
                )
                return model, X_train_scaled, X_test_scaled
                
            elif algorithm == 'neural_network':
                model = MLPClassifier(
                    random_state=random_state,
                    hidden_layer_sizes=hyperparams.get('hidden_layer_sizes', (100,)),
                    activation=hyperparams.get('activation', 'relu'),
                    solver=hyperparams.get('solver', 'adam'),
                    alpha=hyperparams.get('alpha', 0.0001),
                    learning_rate=hyperparams.get('learning_rate', 'constant'),
                    max_iter=hyperparams.get('max_iter', 500),
                    early_stopping=hyperparams.get('early_stopping', True),
                    validation_fraction=hyperparams.get('validation_fraction', 0.1)
                )
                return model, X_train_scaled, X_test_scaled
                
            elif algorithm == 'adaboost':
                base_estimator = hyperparams.get('base_estimator', DecisionTreeClassifier(max_depth=1))
                model = AdaBoostClassifier(
                    base_estimator=base_estimator,
                    n_estimators=hyperparams.get('n_estimators', 50),
                    learning_rate=hyperparams.get('learning_rate', 1.0),
                    random_state=random_state
                )
                return model, X_train_split, X_test_split
                
            elif algorithm == 'ensemble':
                # 创建集成模型
                estimators = [
                    ('rf', RandomForestClassifier(n_estimators=50, random_state=random_state)),
                    ('svm', SVC(probability=True, random_state=random_state)),
                    ('lr', LogisticRegression(random_state=random_state, max_iter=1000))
                ]
                
                if XGBOOST_AVAILABLE:
                    estimators.append(('xgb', xgb.XGBClassifier(random_state=random_state, eval_metric='logloss')))
                
                model = VotingClassifier(
                    estimators=estimators,
                    voting=hyperparams.get('voting', 'soft')
                )
                return model, X_train_scaled, X_test_scaled
                
            else:
                raise ValueError(f"不支持的算法: {algorithm}")
        
        # 获取模型和数据
        try:
            model, X_train_final, X_test_final = get_model_and_data(algorithm, hyperparams, random_state)
        except (ImportError, ValueError) as e:
            return {"error": str(e)}
        
        # 超参数调优（如果启用）
        tuning_info = None
        if enable_hyperparameter_tuning:
            print("正在进行超参数调优...")
            
            # 定义超参数网格
            param_grids = {
                'logistic_regression': {
                    'C': [0.1, 1, 10],
                    'solver': ['lbfgs', 'liblinear'],
                    'penalty': ['l1', 'l2']
                },
                'random_forest': {
                    'n_estimators': [50, 100, 200],
                    'max_depth': [None, 10, 20],
                    'min_samples_split': [2, 5, 10]
                },
                'svm': {
                    'C': [0.1, 1, 10],
                    'kernel': ['rbf', 'linear'],
                    'gamma': ['scale', 'auto']
                },
                'xgboost': {
                    'n_estimators': [50, 100, 200],
                    'learning_rate': [0.01, 0.1, 0.2],
                    'max_depth': [3, 6, 9]
                } if XGBOOST_AVAILABLE else {},
                'neural_network': {
                    'hidden_layer_sizes': [(50,), (100,), (100, 50)],
                    'alpha': [0.0001, 0.001, 0.01],
                    'learning_rate': ['constant', 'adaptive']
                }
            }
            
            if algorithm in param_grids and param_grids[algorithm]:
                # 创建基础模型用于调优
                base_model, _, _ = get_model_and_data(algorithm, {}, random_state)
                
                # 网格搜索
                grid_search = GridSearchCV(
                    base_model, 
                    param_grids[algorithm],
                    cv=3,
                    scoring='f1_weighted',
                    n_jobs=-1,
                    verbose=0
                )
                
                grid_search.fit(X_train_final, y_train_split)
                
                # 使用最佳参数更新模型
                best_params = grid_search.best_params_
                hyperparams.update(best_params)
                model, X_train_final, X_test_final = get_model_and_data(algorithm, hyperparams, random_state)
                
                tuning_info = {
                    'best_params': best_params,
                    'best_score': float(grid_search.best_score_),
                    'cv_results': {
                        'mean_scores': grid_search.cv_results_['mean_test_score'].tolist()[:5],  # 只保存前5个结果
                        'std_scores': grid_search.cv_results_['std_test_score'].tolist()[:5]
                    }
                }
        
        # 交叉验证评估（如果启用）
        cv_scores = None
        if cross_validation:
            cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=random_state)
            cv_scores_array = cross_val_score(model, X_train_final, y_train_split, cv=cv, scoring='f1_weighted')
            cv_scores = {
                'scores': cv_scores_array.tolist(),
                'mean': float(cv_scores_array.mean()),
                'std': float(cv_scores_array.std()),
                'confidence_interval': [
                    float(cv_scores_array.mean() - 2 * cv_scores_array.std()),
                    float(cv_scores_array.mean() + 2 * cv_scores_array.std())
                ]
            }
        
        # 训练最终模型
        model.fit(X_train_final, y_train_split)
        
        # 预测
        y_pred = model.predict(X_test_final)
        y_pred_proba = None
        
        # 获取预测概率（如果支持）
        if hasattr(model, 'predict_proba'):
            y_pred_proba = model.predict_proba(X_test_final)
        
        # 计算增强的评估指标
        accuracy = accuracy_score(y_test_split, y_pred)
        balanced_accuracy = balanced_accuracy_score(y_test_split, y_pred)
        
        # 多分类情况下的精确率、召回率、F1分数
        precision = precision_score(y_test_split, y_pred, average='weighted', zero_division=0)
        recall = recall_score(y_test_split, y_pred, average='weighted', zero_division=0)
        f1 = f1_score(y_test_split, y_pred, average='weighted', zero_division=0)
        
        # 额外的评估指标
        mcc = matthews_corrcoef(y_test_split, y_pred)  # Matthews相关系数
        kappa = cohen_kappa_score(y_test_split, y_pred)  # Cohen's Kappa
        
        # 分类报告
        class_report = classification_report(y_test_split, y_pred, 
                                           target_names=[str(name) for name in class_names],
                                           output_dict=True, zero_division=0)
        
        # 混淆矩阵
        conf_matrix = confusion_matrix(y_test_split, y_pred)
        
        # ROC-AUC（仅适用于二分类或多分类）
        roc_auc = None
        if len(class_names) == 2 and y_pred_proba is not None:
            roc_auc = roc_auc_score(y_test_split, y_pred_proba[:, 1])
        elif len(class_names) > 2 and y_pred_proba is not None:
            roc_auc = roc_auc_score(y_test_split, y_pred_proba, multi_class='ovr', average='weighted')
        
        # 计算每个类别的详细指标
        per_class_metrics = {}
        for i, class_name in enumerate(class_names):
            if len(class_names) == 2:
                # 二分类情况
                class_precision = precision_score(y_test_split, y_pred, pos_label=i, zero_division=0)
                class_recall = recall_score(y_test_split, y_pred, pos_label=i, zero_division=0)
                class_f1 = f1_score(y_test_split, y_pred, pos_label=i, zero_division=0)
            else:
                # 多分类情况
                class_precision = precision_score(y_test_split, y_pred, labels=[i], average='micro', zero_division=0)
                class_recall = recall_score(y_test_split, y_pred, labels=[i], average='micro', zero_division=0)
                class_f1 = f1_score(y_test_split, y_pred, labels=[i], average='micro', zero_division=0)
            
            per_class_metrics[str(class_name)] = {
                'precision': float(class_precision),
                'recall': float(class_recall),
                'f1_score': float(class_f1),
                'support': int(np.sum(y_test_split == i))
            }
        
        # 特征重要性（如果支持）
        feature_importance = None
        if hasattr(model, 'feature_importances_'):
            feature_importance = model.feature_importances_.tolist()
        elif hasattr(model, 'coef_') and len(model.coef_.shape) == 2:
            # 对于逻辑回归等线性模型
            feature_importance = np.abs(model.coef_[0]).tolist()
        
        # 性能监控和诊断
        performance_monitoring = None
        if enable_performance_monitoring:
            # 计算预测置信度分布
            prediction_confidence = []
            if y_pred_proba is not None:
                prediction_confidence = np.max(y_pred_proba, axis=1).tolist()
            
            # 错误分析
            misclassified_indices = np.where(y_test_split != y_pred)[0]
            error_analysis = {
                'total_errors': len(misclassified_indices),
                'error_rate': float(len(misclassified_indices) / len(y_test_split)),
                'error_distribution': {}
            }
            
            # 按类别统计错误
            for true_class in class_names:
                true_idx = np.where(y_test_split == class_names.index(true_class))[0]
                errors_in_class = len(np.intersect1d(true_idx, misclassified_indices))
                error_analysis['error_distribution'][str(true_class)] = {
                    'errors': errors_in_class,
                    'total_samples': len(true_idx),
                    'error_rate': float(errors_in_class / len(true_idx)) if len(true_idx) > 0 else 0.0
                }
            
            # 学习曲线分析（简化版）
            learning_curve_info = None
            if len(X_train_final) > 100:  # 只有足够数据时才分析
                train_sizes = [0.2, 0.4, 0.6, 0.8, 1.0]
                train_scores = []
                
                for size in train_sizes:
                    n_samples = int(len(X_train_final) * size)
                    if n_samples < 10:
                        continue
                    
                    # 训练子模型
                    temp_model, _, _ = get_model_and_data(algorithm, hyperparams, random_state)
                    temp_model.fit(X_train_final[:n_samples], y_train_split[:n_samples])
                    temp_pred = temp_model.predict(X_test_final)
                    temp_score = f1_score(y_test_split, temp_pred, average='weighted', zero_division=0)
                    train_scores.append(float(temp_score))
                
                learning_curve_info = {
                    'train_sizes': train_sizes[:len(train_scores)],
                    'scores': train_scores
                }
            
            performance_monitoring = {
                'prediction_confidence': {
                    'mean': float(np.mean(prediction_confidence)) if prediction_confidence else None,
                    'std': float(np.std(prediction_confidence)) if prediction_confidence else None,
                    'min': float(np.min(prediction_confidence)) if prediction_confidence else None,
                    'max': float(np.max(prediction_confidence)) if prediction_confidence else None
                },
                'error_analysis': error_analysis,
                'learning_curve': learning_curve_info,
                'model_complexity': {
                    'n_parameters': getattr(model, 'n_features_in_', X_train_final.shape[1]),
                    'training_samples': len(X_train_final),
                    'test_samples': len(X_test_final)
                }
            }
        
        # 模型解释性分析
        model_interpretation = None
        if enable_model_interpretation:
            interpretation_results = {}
            
            # SHAP值分析（简化版，仅对小数据集）
            if len(X_test_final) <= 100 and X_test_final.shape[1] <= 20:
                try:
                    # 计算特征贡献度（基于排列重要性的简化版本）
                    from sklearn.inspection import permutation_importance
                    
                    perm_importance = permutation_importance(
                        model, X_test_final, y_test_split, 
                        n_repeats=5, random_state=random_state, scoring='f1_weighted'
                    )
                    
                    interpretation_results['permutation_importance'] = {
                        'importances_mean': perm_importance.importances_mean.tolist(),
                        'importances_std': perm_importance.importances_std.tolist()
                    }
                except Exception as e:
                    interpretation_results['permutation_importance_error'] = str(e)
            
            # 决策边界分析（仅对2D数据）
            if X_test_final.shape[1] == 2 and len(class_names) <= 5:
                try:
                    # 创建网格点
                    h = 0.02
                    x_min, x_max = X_test_final[:, 0].min() - 1, X_test_final[:, 0].max() + 1
                    y_min, y_max = X_test_final[:, 1].min() - 1, X_test_final[:, 1].max() + 1
                    xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
                                        np.arange(y_min, y_max, h))
                    
                    # 预测网格点
                    grid_points = np.c_[xx.ravel(), yy.ravel()]
                    if len(grid_points) <= 10000:  # 限制计算量
                        Z = model.predict(grid_points)
                        interpretation_results['decision_boundary'] = {
                            'grid_shape': xx.shape,
                            'predictions': Z.tolist()
                        }
                except Exception as e:
                    interpretation_results['decision_boundary_error'] = str(e)
            
            # 预测解释（前5个样本）
            if y_pred_proba is not None and len(y_pred_proba) > 0:
                sample_explanations = []
                for i in range(min(5, len(y_pred_proba))):
                    explanation = {
                        'sample_index': i,
                        'true_label': int(y_test_split[i]),
                        'predicted_label': int(y_pred[i]),
                        'prediction_probabilities': y_pred_proba[i].tolist(),
                        'confidence': float(np.max(y_pred_proba[i])),
                        'is_correct': bool(y_test_split[i] == y_pred[i])
                    }
                    sample_explanations.append(explanation)
                
                interpretation_results['sample_explanations'] = sample_explanations
            
            model_interpretation = interpretation_results
        
        # 创建可视化
        n_classes = len(class_names)
        if n_classes <= 10:  # 只为类别数不太多的情况创建可视化
            fig, axes = plt.subplots(2, 2, figsize=(15, 12))
            fig.suptitle(f'{algorithm.replace("_", " ").title()} 分类结果', fontsize=16)
            
            # 混淆矩阵热力图
            sns.heatmap(conf_matrix, annot=True, fmt='d', cmap='Blues', 
                       xticklabels=class_names, yticklabels=class_names, ax=axes[0, 0])
            axes[0, 0].set_title('混淆矩阵')
            axes[0, 0].set_xlabel('预测标签')
            axes[0, 0].set_ylabel('真实标签')
            
            # 评估指标柱状图
            metrics = ['准确率', '精确率', '召回率', 'F1分数']
            values = [accuracy, precision, recall, f1]
            bars = axes[0, 1].bar(metrics, values, color=['#3498db', '#e74c3c', '#2ecc71', '#f39c12'])
            axes[0, 1].set_title('评估指标')
            axes[0, 1].set_ylabel('分数')
            axes[0, 1].set_ylim(0, 1)
            
            # 在柱状图上添加数值标签
            for bar, value in zip(bars, values):
                axes[0, 1].text(bar.get_x() + bar.get_width()/2, bar.get_height() + 0.01, 
                               f'{value:.3f}', ha='center', va='bottom')
            
            # 特征重要性（如果有）
            if feature_importance is not None:
                feature_indices = list(range(len(feature_importance)))
                axes[1, 0].barh(feature_indices, feature_importance, color='skyblue')
                axes[1, 0].set_title('特征重要性')
                axes[1, 0].set_xlabel('重要性分数')
                axes[1, 0].set_ylabel('特征索引')
            else:
                axes[1, 0].text(0.5, 0.5, '该算法不支持\n特征重要性分析', 
                               ha='center', va='center', transform=axes[1, 0].transAxes,
                               fontsize=12, bbox=dict(boxstyle="round,pad=0.3", facecolor="lightgray"))
                axes[1, 0].set_title('特征重要性')
                axes[1, 0].axis('off')
            
            # ROC曲线（仅二分类）
            if len(class_names) == 2 and y_pred_proba is not None:
                fpr, tpr, _ = roc_curve(y_test_split, y_pred_proba[:, 1])
                axes[1, 1].plot(fpr, tpr, linewidth=2, label=f'ROC曲线 (AUC = {roc_auc:.3f})')
                axes[1, 1].plot([0, 1], [0, 1], 'k--', linewidth=1, label='随机分类器')
                axes[1, 1].set_xlabel('假正率 (FPR)')
                axes[1, 1].set_ylabel('真正率 (TPR)')
                axes[1, 1].set_title('ROC曲线')
                axes[1, 1].legend()
                axes[1, 1].grid(True, alpha=0.3)
            else:
                # 类别分布
                unique, counts = np.unique(y_test_split, return_counts=True)
                class_labels = [class_names[i] for i in unique]
                axes[1, 1].pie(counts, labels=class_labels, autopct='%1.1f%%', startangle=90)
                axes[1, 1].set_title('测试集类别分布')
            
            plt.tight_layout()
            plot_base64 = create_plot_base64(fig)
        else:
            plot_base64 = None
        
        return {
            "algorithm": algorithm,
            "model_trained": True,
            "class_names": class_names,
            "training_samples": len(X_train_split),
            "test_samples": len(X_test_split),
            "evaluation_metrics": {
                "accuracy": float(accuracy),
                "balanced_accuracy": float(balanced_accuracy),
                "precision": float(precision),
                "recall": float(recall),
                "f1_score": float(f1),
                "roc_auc": float(roc_auc) if roc_auc is not None else None,
                "matthews_corrcoef": float(mcc),
                "cohen_kappa": float(kappa)
            },
            "per_class_metrics": per_class_metrics,
            "confusion_matrix": conf_matrix.tolist(),
            "classification_report": class_report,
            "feature_importance": feature_importance,
            "feature_selection_info": feature_selection_info,
            "hyperparameters": hyperparams,
            "hyperparameter_tuning_info": tuning_info,
            "cross_validation_scores": cv_scores,
            "auto_selection_results": auto_selection_results,
            "performance_monitoring": performance_monitoring,
            "model_interpretation": model_interpretation,
            "predictions": y_pred.tolist(),
            "prediction_probabilities": y_pred_proba.tolist() if y_pred_proba is not None else None,
            "visualization": plot_base64
        }
        
    except Exception as e:
        return {"error": f"模型训练错误: {str(e)}"}


@mcp.tool()
def predict_classification(X_new: List[List[float]], model_data: Dict[str, Any]) -> Dict[str, Any]:
    """
    使用训练好的分类模型进行预测
    
    Args:
        X_new: 新的特征数据
        model_data: 训练模型时返回的模型数据
    
    Returns:
        预测结果
    """
    try:
        # 注意：这是一个简化的预测函数
        # 在实际应用中，需要保存和加载完整的模型对象
        return {
            "error": "预测功能需要完整的模型序列化支持，当前版本仅支持训练和评估"
        }
        
    except Exception as e:
        return {"error": f"预测错误: {str(e)}"}


@mcp.tool()
def compare_classification_algorithms(X_data: List[List[float]], y_data: List[Union[int, str]], 
                                    algorithms: List[str] = None, 
                                    test_size: float = 0.2, random_state: int = 42) -> Dict[str, Any]:
    """
    比较多种分类算法的性能
    
    Args:
        X_data: 特征数据
        y_data: 标签数据
        algorithms: 要比较的算法列表
        test_size: 测试集比例
        random_state: 随机种子
    
    Returns:
        算法比较结果
    """
    try:
        if algorithms is None:
            algorithms = ['logistic_regression', 'decision_tree', 'random_forest', 'svm', 'knn']
        
        results = {}
        comparison_metrics = []
        
        # 为每个算法训练模型
        for algorithm in algorithms:
            print(f"训练 {algorithm}...")
            result = train_classification_model(
                X_data, y_data, algorithm=algorithm, 
                test_size=test_size, random_state=random_state
            )
            
            if "error" not in result:
                results[algorithm] = result
                comparison_metrics.append({
                    'algorithm': algorithm,
                    'accuracy': result['evaluation_metrics']['accuracy'],
                    'precision': result['evaluation_metrics']['precision'],
                    'recall': result['evaluation_metrics']['recall'],
                    'f1_score': result['evaluation_metrics']['f1_score']
                })
        
        if not comparison_metrics:
            return {"error": "所有算法都训练失败"}
        
        # 创建比较可视化
        df_metrics = pd.DataFrame(comparison_metrics)
        
        fig, axes = plt.subplots(2, 2, figsize=(15, 12))
        fig.suptitle('分类算法性能比较', fontsize=16)
        
        # 准确率比较
        bars1 = axes[0, 0].bar(df_metrics['algorithm'], df_metrics['accuracy'], 
                              color='skyblue', alpha=0.8)
        axes[0, 0].set_title('准确率比较')
        axes[0, 0].set_ylabel('准确率')
        axes[0, 0].tick_params(axis='x', rotation=45)
        
        # 添加数值标签
        for bar, value in zip(bars1, df_metrics['accuracy']):
            axes[0, 0].text(bar.get_x() + bar.get_width()/2, bar.get_height() + 0.005, 
                           f'{value:.3f}', ha='center', va='bottom')
        
        # F1分数比较
        bars2 = axes[0, 1].bar(df_metrics['algorithm'], df_metrics['f1_score'], 
                              color='lightcoral', alpha=0.8)
        axes[0, 1].set_title('F1分数比较')
        axes[0, 1].set_ylabel('F1分数')
        axes[0, 1].tick_params(axis='x', rotation=45)
        
        for bar, value in zip(bars2, df_metrics['f1_score']):
            axes[0, 1].text(bar.get_x() + bar.get_width()/2, bar.get_height() + 0.005, 
                           f'{value:.3f}', ha='center', va='bottom')
        
        # 综合性能雷达图（选择前4个算法）
        metrics_to_plot = ['accuracy', 'precision', 'recall', 'f1_score']
        angles = np.linspace(0, 2 * np.pi, len(metrics_to_plot), endpoint=False).tolist()
        angles += angles[:1]  # 闭合图形
        
        ax_radar = plt.subplot(2, 2, 3, projection='polar')
        
        colors = plt.cm.Set1(np.linspace(0, 1, len(df_metrics)))
        for i, (_, row) in enumerate(df_metrics.iterrows()):
            values = [row[metric] for metric in metrics_to_plot]
            values += values[:1]  # 闭合图形
            
            ax_radar.plot(angles, values, 'o-', linewidth=2, 
                         label=row['algorithm'], color=colors[i])
            ax_radar.fill(angles, values, alpha=0.25, color=colors[i])
        
        ax_radar.set_xticks(angles[:-1])
        ax_radar.set_xticklabels(['准确率', '精确率', '召回率', 'F1分数'])
        ax_radar.set_ylim(0, 1)
        ax_radar.set_title('综合性能比较', pad=20)
        ax_radar.legend(loc='upper right', bbox_to_anchor=(1.3, 1.0))
        
        # 性能排名表
        df_sorted = df_metrics.sort_values('f1_score', ascending=False)
        
        # 创建排名表
        table_data = []
        for i, (_, row) in enumerate(df_sorted.iterrows()):
            table_data.append([
                i + 1,  # 排名
                row['algorithm'],
                f"{row['accuracy']:.3f}",
                f"{row['f1_score']:.3f}"
            ])
        
        axes[1, 1].axis('tight')
        axes[1, 1].axis('off')
        table = axes[1, 1].table(cellText=table_data,
                                colLabels=['排名', '算法', '准确率', 'F1分数'],
                                cellLoc='center',
                                loc='center')
        table.auto_set_font_size(False)
        table.set_fontsize(10)
        table.scale(1, 2)
        axes[1, 1].set_title('性能排名', pad=20)
        
        plt.tight_layout()
        plot_base64 = create_plot_base64(fig)
        
        # 找出最佳算法
        best_algorithm = df_sorted.iloc[0]['algorithm']
        best_f1 = df_sorted.iloc[0]['f1_score']
        
        return {
            "comparison_results": results,
            "performance_metrics": comparison_metrics,
            "best_algorithm": best_algorithm,
            "best_f1_score": float(best_f1),
            "algorithms_compared": algorithms,
            "ranking": df_sorted.to_dict('records'),
            "visualization": plot_base64
        }
        
    except Exception as e:
        return {"error": f"算法比较错误: {str(e)}"}
if __name__ == "__main__":
    # 启动MCP服务器，使用stdio传输协议
    
    mcp.run(transport="stdio")