#!/usr/bin/env python3
"""
非参数回归MCP服务器

提供完整的非参数回归分析功能，包括：
1. loess_regression - LOESS局部回归
2. kernel_regression - 核回归(Nadaraya-Watson)
3. spline_regression - 样条回归
4. local_polynomial_regression - 局部多项式回归
5. auto_select_regression - 自动选择最佳回归方法
6. predict_values - 使用拟合模型预测新值
7. compare_methods - 比较多种回归方法
8. regression_diagnostics - 回归诊断
9. sample_size_analysis - 样本量分析
10. method_recommendation - 方法推荐
11. plot_regression - 回归拟合图
12. plot_residuals - 残差图
13. plot_comparison - 方法比较图
14. plot_diagnostics - 综合诊断图
"""

import asyncio
import json
import logging
import sys
from typing import Any, Dict, List, Optional, Union, Tuple
import traceback
import warnings

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')  # 使用非交互式后端
import seaborn as sns
import base64
import io
from scipy import stats, interpolate
from scipy.spatial.distance import cdist
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import cross_val_score
from sklearn.metrics import mean_squared_error, r2_score
from statsmodels.nonparametric.smoothers_lowess import lowess
from statsmodels.nonparametric.kernel_regression import KernelReg

from mcp.server import NotificationOptions, Server
from mcp.server.models import InitializationOptions
from mcp.server.stdio import stdio_server
from mcp.types import (
    Tool,
    TextContent,
    ImageContent,
    EmbeddedResource
)

# 设置中文字体支持
def setup_chinese_fonts():
    """
    设置中文字体支持，包含字体检测和回退机制
    """
    import matplotlib.font_manager as fm
    
    # 获取系统可用字体 - 使用更安全的方式避免_rebuild错误
    try:
        available_fonts = [f.name for f in fm.fontManager.ttflist]
    except AttributeError:
        # 如果fontManager不可用，使用备用方法
        try:
            available_fonts = [f.name for f in fm.get_font_names()]
        except:
            # 最后的备用方案，使用常见字体列表
            available_fonts = ['SimHei', 'Microsoft YaHei', 'SimSun', 'Arial', 'DejaVu Sans']
    
    # 中文字体优先级列表
    chinese_fonts = [
        'SimHei',           # 黑体
        'Microsoft YaHei',  # 微软雅黑
        'SimSun',           # 宋体
        'KaiTi',            # 楷体
        'FangSong',         # 仿宋
        'STSong',           # 华文宋体
        'STKaiti',          # 华文楷体
        'STHeiti',          # 华文黑体
        'STFangsong',       # 华文仿宋
        'LiSu',             # 隶书
        'YouYuan',          # 幼圆
        'NSimSun',          # 新宋体
        'Arial Unicode MS', # Arial Unicode MS (支持中文)
        'Noto Sans CJK SC', # Google Noto字体
        'Source Han Sans SC', # 思源黑体
        'WenQuanYi Micro Hei', # 文泉驿微米黑
        'DejaVu Sans'       # 回退字体
    ]
    
    # 查找可用的中文字体
    selected_fonts = []
    for font in chinese_fonts:
        if font in available_fonts:
            selected_fonts.append(font)
    
    # 如果没有找到任何中文字体，使用默认字体并记录警告
    if not selected_fonts:
        logger.warning("未找到可用的中文字体，中文可能显示为方框")
        selected_fonts = ['DejaVu Sans', 'Arial', 'sans-serif']
    else:
        logger.info(f"找到可用中文字体: {selected_fonts[:3]}")
    
    # 设置matplotlib字体参数
    plt.rcParams['font.sans-serif'] = selected_fonts
    plt.rcParams['axes.unicode_minus'] = False
    
    # 设置字体大小
    plt.rcParams['font.size'] = 10
    plt.rcParams['axes.titlesize'] = 14
    plt.rcParams['axes.labelsize'] = 12
    plt.rcParams['xtick.labelsize'] = 10
    plt.rcParams['ytick.labelsize'] = 10
    plt.rcParams['legend.fontsize'] = 10
    
    return selected_fonts[0] if selected_fonts else 'DejaVu Sans'

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

# 安全的字体初始化，避免触发font_manager的_rebuild
try:
    # 直接设置字体，不调用可能有问题的setup_chinese_fonts
    plt.rcParams['font.sans-serif'] = ['SimHei', 'Microsoft YaHei', 'SimSun', 'Arial', 'DejaVu Sans']
    plt.rcParams['axes.unicode_minus'] = False
    selected_font = 'SimHei'
    logger.info(f"字体设置成功: {selected_font}")
except Exception as e:
    logger.warning(f"字体设置失败，使用默认字体: {str(e)}")
    selected_font = 'DejaVu Sans'

sns.set_style("whitegrid")

# 创建服务器实例
server = Server("nonparametric-regression")

# 存储拟合的模型
fitted_models = {}

def ensure_chinese_display():
    """
    确保中文正确显示的辅助函数
    在每次绘图前调用此函数
    """
    try:
        # 强制设置中文字体支持
        plt.rcParams['font.sans-serif'] = ['SimHei', 'Microsoft YaHei', 'SimSun', 'Arial', 'DejaVu Sans']
        plt.rcParams['axes.unicode_minus'] = False
        plt.rcParams['font.size'] = 10
        plt.rcParams['axes.titlesize'] = 14
        plt.rcParams['axes.labelsize'] = 12
        plt.rcParams['xtick.labelsize'] = 10
        plt.rcParams['ytick.labelsize'] = 10
        plt.rcParams['legend.fontsize'] = 10
        return True
    except Exception as e:
        logger.warning(f"字体设置警告: {str(e)}")
        return True

def create_plot_base64(fig, dpi=300):
    """
    将matplotlib图形转换为base64编码的图片
    
    Args:
        fig: matplotlib图形对象
        dpi: 图片分辨率
        
    Returns:
        base64编码的图片字符串
    """
    try:
        # 确保中文显示
        ensure_chinese_display()
        
        buffer = io.BytesIO()
        fig.savefig(buffer, format='png', dpi=dpi, bbox_inches='tight', 
                   facecolor='white', edgecolor='none')
        buffer.seek(0)
        image_base64 = base64.b64encode(buffer.getvalue()).decode('utf-8')
        buffer.close()
        plt.close(fig)  # 关闭图形以释放内存
        return image_base64
    except Exception as e:
        logger.error(f"转换图片为base64时出错: {str(e)}")
        plt.close(fig)
        raise

class NonparametricRegression:
    """
    非参数回归方法集合类
    """
    
    def __init__(self):
        self.scaler = StandardScaler()
        self.fitted_models = {}
    
    def validate_data(self, x: np.ndarray, y: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
        """
        数据验证和预处理
        
        Args:
            x: 自变量数组
            y: 因变量数组
            
        Returns:
            处理后的x, y数组
        """
        x = np.asarray(x, dtype=float)
        y = np.asarray(y, dtype=float)
        
        if len(x) != len(y):
            raise ValueError("x和y的长度必须相同")
            
        if len(x) < 3:
            raise ValueError("样本量至少需要3个观测值")
            
        # 检查缺失值
        if np.any(np.isnan(x)) or np.any(np.isnan(y)):
            raise ValueError("数据中不能包含缺失值")
            
        # 检查无穷值
        if np.any(np.isinf(x)) or np.any(np.isinf(y)):
            raise ValueError("数据中不能包含无穷值")
            
        # 排序
        sort_idx = np.argsort(x)
        x = x[sort_idx]
        y = y[sort_idx]
        
        return x, y
    
    def loess_regression(self, x: np.ndarray, y: np.ndarray, 
                        span: float = 0.75, degree: int = 2, 
                        robust: bool = True, iterations: int = 3) -> Dict[str, Any]:
        """
        LOESS局部回归
        
        Args:
            x: 自变量
            y: 因变量
            span: 平滑参数(0-1)
            degree: 多项式阶数(1或2)
            robust: 是否使用鲁棒版本
            iterations: 鲁棒迭代次数
            
        Returns:
            回归结果字典
        """
        try:
            x, y = self.validate_data(x, y)
            
            # 使用statsmodels的lowess
            if robust:
                smoothed = lowess(y, x, frac=span, it=iterations, 
                                return_sorted=True, is_sorted=True)
                fitted_x = smoothed[:, 0]
                fitted_y = smoothed[:, 1]
            else:
                smoothed = lowess(y, x, frac=span, it=0, 
                                return_sorted=True, is_sorted=True)
                fitted_x = smoothed[:, 0]
                fitted_y = smoothed[:, 1]
            
            # 计算残差和统计量
            residuals = y - fitted_y
            mse = np.mean(residuals**2)
            r_squared = r2_score(y, fitted_y)
            
            # 计算置信区间(简化版本)
            residual_std = np.std(residuals)
            confidence_intervals = {
                'lower': (fitted_y - 1.96 * residual_std).tolist(),
                'upper': (fitted_y + 1.96 * residual_std).tolist()
            }
            
            result = {
                'method': 'LOESS',
                'fitted_x': fitted_x.tolist(),
                'fitted_y': fitted_y.tolist(),
                'fitted_model': {'fitted_x': fitted_x, 'fitted_y': fitted_y},
                'residuals': residuals.tolist(),
                'mse': float(mse),
                'r_squared': float(r_squared),
                'rmse': float(np.sqrt(mse)),
                'parameters': {
                    'span': span,
                    'degree': degree,
                    'robust': robust,
                    'iterations': iterations
                },
                'confidence_intervals': confidence_intervals,
                'sample_size': len(x)
            }
            
            return result
            
        except Exception as e:
            logger.error(f"LOESS回归错误: {str(e)}")
            raise
    
    def _select_bandwidth_cv(self, x: np.ndarray, y: np.ndarray, kernel: str = 'gaussian') -> float:
        """
        使用交叉验证选择最优带宽
        """
        n = len(x)
        # 候选带宽范围
        h_range = np.logspace(-2, 0, 20) * (np.max(x) - np.min(x))
        
        best_h = h_range[0]
        best_score = float('inf')
        
        for h in h_range:
            try:
                kr = KernelReg(y, x, var_type='c', reg_type='ll', bw=[h])
                fitted_y, _ = kr.fit(x)
                mse = np.mean((y - fitted_y)**2)
                if mse < best_score:
                    best_score = mse
                    best_h = h
            except:
                continue
                
        return float(best_h)
    
    def kernel_regression(self, x: np.ndarray, y: np.ndarray, 
                         bandwidth: Optional[float] = None, 
                         kernel: str = 'gaussian') -> Dict[str, Any]:
        """
        核回归(Nadaraya-Watson估计器)
        
        Args:
            x: 自变量
            y: 因变量
            bandwidth: 带宽参数
            kernel: 核函数类型
            
        Returns:
            回归结果字典
        """
        try:
            x, y = self.validate_data(x, y)
            
            # 自动选择带宽
            if bandwidth is None:
                bandwidth = self._select_bandwidth_cv(x, y, kernel)
            
            # 使用statsmodels的核回归
            kr = KernelReg(y, x, var_type='c', reg_type='ll', bw=[bandwidth])
            fitted_y, _ = kr.fit(x)
            
            # 计算统计量
            residuals = y - fitted_y
            mse = np.mean(residuals**2)
            r_squared = r2_score(y, fitted_y)
            
            # 计算置信区间
            residual_std = np.std(residuals)
            confidence_intervals = {
                'lower': (fitted_y - 1.96 * residual_std).tolist(),
                'upper': (fitted_y + 1.96 * residual_std).tolist()
            }
            
            result = {
                'method': 'Kernel Regression',
                'fitted_x': x.tolist(),
                'fitted_y': fitted_y.tolist(),
                'fitted_model': {'fitted_x': x, 'fitted_y': fitted_y, 'bandwidth': bandwidth, 'kernel': kernel},
                'residuals': residuals.tolist(),
                'mse': float(mse),
                'r_squared': float(r_squared),
                'rmse': float(np.sqrt(mse)),
                'parameters': {
                    'bandwidth': float(bandwidth),
                    'kernel': kernel
                },
                'confidence_intervals': confidence_intervals,
                'sample_size': len(x)
            }
            
            return result
            
        except Exception as e:
            logger.error(f"核回归错误: {str(e)}")
            raise
    
    def spline_regression(self, x: np.ndarray, y: np.ndarray, 
                         spline_type: str = 'cubic', 
                         smoothing: Optional[float] = None,
                         knots: Optional[int] = None) -> Dict[str, Any]:
        """
        样条回归
        
        Args:
            x: 自变量
            y: 因变量
            spline_type: 样条类型('linear', 'cubic')
            smoothing: 平滑参数
            knots: 节点数量
            
        Returns:
            回归结果字典
        """
        try:
            x, y = self.validate_data(x, y)
            
            if spline_type == 'cubic':
                if smoothing is not None:
                    # 平滑样条
                    tck = interpolate.splrep(x, y, s=smoothing)
                else:
                    # 插值样条
                    tck = interpolate.splrep(x, y)
                fitted_y = interpolate.splev(x, tck)
            else:
                # 线性样条
                f = interpolate.interp1d(x, y, kind='linear', fill_value='extrapolate')
                fitted_y = f(x)
            
            # 计算统计量
            residuals = y - fitted_y
            mse = np.mean(residuals**2)
            r_squared = r2_score(y, fitted_y)
            
            # 计算置信区间
            residual_std = np.std(residuals)
            confidence_intervals = {
                'lower': (fitted_y - 1.96 * residual_std).tolist(),
                'upper': (fitted_y + 1.96 * residual_std).tolist()
            }
            
            result = {
                'method': f'{spline_type.title()} Spline',
                'fitted_x': x.tolist(),
                'fitted_y': fitted_y.tolist(),
                'fitted_model': {'fitted_x': x, 'fitted_y': fitted_y, 'spline_type': spline_type},
                'residuals': residuals.tolist(),
                'mse': float(mse),
                'r_squared': float(r_squared),
                'rmse': float(np.sqrt(mse)),
                'parameters': {
                    'spline_type': spline_type,
                    'smoothing': smoothing,
                    'knots': knots
                },
                'confidence_intervals': confidence_intervals,
                'sample_size': len(x)
            }
            
            return result
            
        except Exception as e:
            logger.error(f"样条回归错误: {str(e)}")
            raise
    
    def local_polynomial(self, x: np.ndarray, y: np.ndarray, 
                        degree: int = 1, bandwidth: Optional[float] = None) -> Dict[str, Any]:
        """
        局部多项式回归
        
        Args:
            x: 自变量
            y: 因变量
            degree: 多项式阶数
            bandwidth: 带宽参数
            
        Returns:
            回归结果字典
        """
        try:
            x, y = self.validate_data(x, y)
            
            if bandwidth is None:
                bandwidth = 0.3 * (np.max(x) - np.min(x))
            
            fitted_y = np.zeros_like(y)
            
            for i, xi in enumerate(x):
                # 计算权重
                distances = np.abs(x - xi)
                weights = np.exp(-(distances / bandwidth)**2)
                
                # 加权最小二乘
                if degree == 1:
                    # 线性回归
                    X = np.column_stack([np.ones(len(x)), x - xi])
                else:
                    # 多项式回归
                    X = np.column_stack([np.ones(len(x))] + [(x - xi)**j for j in range(1, degree + 1)])
                
                W = np.diag(weights)
                try:
                    beta = np.linalg.solve(X.T @ W @ X, X.T @ W @ y)
                    fitted_y[i] = beta[0]  # 只取常数项
                except np.linalg.LinAlgError:
                    fitted_y[i] = np.average(y, weights=weights)
            
            # 计算统计量
            residuals = y - fitted_y
            mse = np.mean(residuals**2)
            r_squared = r2_score(y, fitted_y)
            
            # 计算置信区间
            residual_std = np.std(residuals)
            confidence_intervals = {
                'lower': (fitted_y - 1.96 * residual_std).tolist(),
                'upper': (fitted_y + 1.96 * residual_std).tolist()
            }
            
            result = {
                'method': f'Local Polynomial (degree {degree})',
                'fitted_x': x.tolist(),
                'fitted_y': fitted_y.tolist(),
                'fitted_model': {'fitted_x': x, 'fitted_y': fitted_y, 'degree': degree, 'bandwidth': bandwidth},
                'residuals': residuals.tolist(),
                'mse': float(mse),
                'r_squared': float(r_squared),
                'rmse': float(np.sqrt(mse)),
                'parameters': {
                    'degree': degree,
                    'bandwidth': float(bandwidth)
                },
                'confidence_intervals': confidence_intervals,
                'sample_size': len(x)
            }
            
            return result
            
        except Exception as e:
            logger.error(f"局部多项式回归错误: {str(e)}")
            raise
    
    def auto_select_method(self, x: np.ndarray, y: np.ndarray) -> Dict[str, Any]:
        """
        自动选择最佳回归方法
        
        Args:
            x: 自变量
            y: 因变量
            
        Returns:
            最佳方法的回归结果
        """
        try:
            x, y = self.validate_data(x, y)
            
            methods = [
                ('LOESS', lambda: self.loess_regression(x, y)),
                ('Kernel', lambda: self.kernel_regression(x, y)),
                ('Cubic Spline', lambda: self.spline_regression(x, y, 'cubic')),
                ('Local Polynomial', lambda: self.local_polynomial(x, y))
            ]
            
            best_method = None
            best_score = float('inf')
            best_result = None
            
            for method_name, method_func in methods:
                try:
                    result = method_func()
                    score = result['mse']
                    if score < best_score:
                        best_score = score
                        best_method = method_name
                        best_result = result
                except Exception as e:
                    logger.warning(f"方法 {method_name} 失败: {str(e)}")
                    continue
            
            if best_result is None:
                raise ValueError("所有方法都失败了")
            
            # 添加自动选择信息
            best_result['auto_selection'] = {
                'selected_method': best_method,
                'selection_criterion': 'MSE',
                'best_score': float(best_score)
            }
            
            return best_result
            
        except Exception as e:
            logger.error(f"自动选择方法错误: {str(e)}")
            raise
    
    def predict(self, model_result: Dict[str, Any], x_new: np.ndarray) -> np.ndarray:
        """
        使用拟合的模型进行预测
        
        Args:
            model_result: 拟合模型结果
            x_new: 新的x值
            
        Returns:
            预测的y值
        """
        try:
            x_new = np.asarray(x_new, dtype=float)
            fitted_model = model_result['fitted_model']
            fitted_x = np.array(fitted_model['fitted_x'])
            fitted_y = np.array(fitted_model['fitted_y'])
            
            # 使用线性插值进行预测
            f = interpolate.interp1d(fitted_x, fitted_y, kind='linear', 
                                   fill_value='extrapolate', bounds_error=False)
            y_pred = f(x_new)
            
            return y_pred
            
        except Exception as e:
            logger.error(f"预测错误: {str(e)}")
            raise

# 创建回归分析器实例
regression_analyzer = NonparametricRegression()

def detect_data_structure(data_input: Union[str, List, Dict, np.ndarray]) -> Dict[str, Any]:
    """
    自动检测数据结构：单样本还是多样本
    与数据预处理服务保持一致的处理方式，避免字典格式乱码问题
    
    Args:
        data_input: 输入数据
        
    Returns:
        检测结果字典
    """
    try:
        # 解析输入数据
        if isinstance(data_input, str):
            try:
                data = json.loads(data_input)
            except json.JSONDecodeError as e:
                logger.error(f"JSON解析失败: {str(e)}")
                raise ValueError(f"无效的JSON字符串: {str(e)}")
        else:
            data = data_input
        
        detection_result = {
            'data_type': None,
            'sample_count': 0,
            'structure': None,
            'recommendations': [],
            'parsed_data': None
        }
        
        if isinstance(data, dict):
            # 检查是否是数据预处理服务的输出格式
            if 'success' in data and 'data' in data and 'output_format' in data:
                logger.info(f"检测到数据预处理服务输出格式，output_format: {data.get('output_format')}")
                
                preprocessed_data = data['data']
                output_format = data['output_format']
                
                # 使用pandas DataFrame处理数据，避免乱码问题
                if output_format == 'matrix':
                    # matrix格式：[[row1], [row2], ...]
                    try:
                        df = pd.DataFrame(preprocessed_data)
                        # 转换为数值类型，处理可能的编码问题
                        numeric_df = df.apply(pd.to_numeric, errors='coerce')
                        
                        if len(numeric_df) >= 2:
                            # 第一行作为x值，第二行作为y值
                            x = numeric_df.iloc[0].dropna().values.astype(float)
                            y = numeric_df.iloc[1].dropna().values.astype(float)
                            
                            # 确保x和y长度一致
                            min_len = min(len(x), len(y))
                            x = x[:min_len]
                            y = y[:min_len]
                            
                            detection_result.update({
                                'data_type': 'single_sample',
                                'sample_count': 1,
                                'structure': 'preprocessed_matrix',
                                'sample_size': len(y),
                                'parsed_data': {'x': x, 'y': y},
                                'recommendations': ['适合进行单样本非参数回归分析', '数据来自预处理服务']
                            })
                            return detection_result
                        elif len(numeric_df) == 1:
                            # 只有一行数据，生成索引作为x值
                            y = numeric_df.iloc[0].dropna().values.astype(float)
                            x = np.arange(len(y), dtype=float)
                            detection_result.update({
                                'data_type': 'single_sample',
                                'sample_count': 1,
                                'structure': 'preprocessed_single_row',
                                'sample_size': len(y),
                                'parsed_data': {'x': x, 'y': y},
                                'recommendations': ['适合进行时间序列回归分析', '数据来自预处理服务']
                            })
                            return detection_result
                        else:
                            raise ValueError("matrix格式数据至少需要1行")
                    except Exception as e:
                        logger.error(f"matrix格式数据处理失败: {str(e)}")
                        raise ValueError(f"matrix格式数据处理失败: {str(e)}")
                        
                elif output_format == 'groups':
                    # groups格式：[[group1], [group2], ...]
                    try:
                        if len(preprocessed_data) == 1:
                            # 单组数据，作为时间序列处理
                            series = pd.Series(preprocessed_data[0])
                            y = pd.to_numeric(series, errors='coerce').dropna().values.astype(float)
                            x = np.arange(len(y), dtype=float)
                            detection_result.update({
                                'data_type': 'single_sample',
                                'sample_count': 1,
                                'structure': 'preprocessed_single_group',
                                'sample_size': len(y),
                                'parsed_data': {'x': x, 'y': y},
                                'recommendations': ['适合进行时间序列回归分析', '数据来自预处理服务']
                            })
                            return detection_result
                        else:
                            # 多组数据，使用pandas处理每组
                            groups = {}
                            for i, group in enumerate(preprocessed_data):
                                series = pd.Series(group)
                                numeric_data = pd.to_numeric(series, errors='coerce').dropna().values.astype(float)
                                groups[f'group_{i+1}'] = numeric_data
                            
                            detection_result.update({
                                'data_type': 'multi_sample',
                                'sample_count': len(preprocessed_data),
                                'structure': 'preprocessed_groups',
                                'group_names': list(groups.keys()),
                                'group_sizes': {k: len(v) for k, v in groups.items()},
                                'parsed_data': groups,
                                'recommendations': ['适合进行多样本比较分析', '可以对每个组分别进行回归分析', '数据来自预处理服务']
                            })
                            return detection_result
                    except Exception as e:
                        logger.error(f"groups格式数据处理失败: {str(e)}")
                        raise ValueError(f"groups格式数据处理失败: {str(e)}")
                        
                else:
                    raise ValueError(f"不支持的数据预处理输出格式: {output_format}")
                    
            elif 'x' in data and 'y' in data:
                # 单样本数据格式 {'x': [...], 'y': [...]}
                try:
                    # 使用pandas Series处理，避免编码问题
                    x_series = pd.Series(data['x'])
                    y_series = pd.Series(data['y'])
                    
                    x = pd.to_numeric(x_series, errors='coerce').dropna().values.astype(float)
                    y = pd.to_numeric(y_series, errors='coerce').dropna().values.astype(float)
                    
                    # 确保长度一致
                    min_len = min(len(x), len(y))
                    x = x[:min_len]
                    y = y[:min_len]
                    
                    detection_result.update({
                        'data_type': 'single_sample',
                        'sample_count': 1,
                        'structure': 'xy_pairs',
                        'sample_size': len(x),
                        'parsed_data': {'x': x, 'y': y},
                        'recommendations': ['适合进行单样本非参数回归分析']
                    })
                    return detection_result
                except Exception as e:
                    logger.error(f"xy格式数据处理失败: {str(e)}")
                    raise ValueError(f"xy格式数据处理失败: {str(e)}")
                    
            elif all(isinstance(v, (list, np.ndarray)) for v in data.values()):
                # 多样本数据格式 {'group1': [...], 'group2': [...], ...}
                try:
                    groups = {}
                    for k, v in data.items():
                        series = pd.Series(v)
                        numeric_data = pd.to_numeric(series, errors='coerce').dropna().values.astype(float)
                        groups[k] = numeric_data
                    
                    detection_result.update({
                        'data_type': 'multi_sample',
                        'sample_count': len(groups),
                        'structure': 'grouped_data',
                        'group_names': list(groups.keys()),
                        'group_sizes': {k: len(v) for k, v in groups.items()},
                        'parsed_data': groups,
                        'recommendations': ['适合进行多样本比较分析', '可以对每个组分别进行回归分析']
                    })
                    return detection_result
                except Exception as e:
                    logger.error(f"分组数据处理失败: {str(e)}")
                    raise ValueError(f"分组数据处理失败: {str(e)}")
        
        elif isinstance(data, (list, np.ndarray)):
            try:
                # 使用pandas处理数组数据
                if isinstance(data, list):
                    data_array = pd.DataFrame(data).apply(pd.to_numeric, errors='coerce').values
                else:
                    data_array = np.array(data)
                
                if data_array.ndim == 1:
                    # 一维数组：单样本时间序列数据
                    # 使用pandas处理可能的编码问题
                    series = pd.Series(data_array)
                    y = pd.to_numeric(series, errors='coerce').dropna().values.astype(float)
                    x = np.arange(len(y), dtype=float)
                    
                    detection_result.update({
                        'data_type': 'single_sample',
                        'sample_count': 1,
                        'structure': 'time_series',
                        'sample_size': len(y),
                        'parsed_data': {'x': x, 'y': y},
                        'recommendations': ['适合进行时间序列回归分析', '建议使用LOESS或样条回归']
                    })
                    return detection_result
                
                elif data_array.ndim == 2:
                    if data_array.shape[1] == 2:
                        # 二维数组：单样本 (x, y) 对
                        df = pd.DataFrame(data_array, columns=['x', 'y'])
                        numeric_df = df.apply(pd.to_numeric, errors='coerce').dropna()
                        
                        x = numeric_df['x'].values.astype(float)
                        y = numeric_df['y'].values.astype(float)
                        
                        detection_result.update({
                            'data_type': 'single_sample',
                            'sample_count': 1,
                            'structure': 'xy_matrix',
                            'sample_size': len(x),
                            'parsed_data': {'x': x, 'y': y},
                            'recommendations': ['适合进行单样本非参数回归分析']
                        })
                        return detection_result
                    else:
                        # 多列数据：多样本数据
                        df = pd.DataFrame(data_array)
                        numeric_df = df.apply(pd.to_numeric, errors='coerce')
                        
                        groups = {}
                        for i in range(numeric_df.shape[1]):
                            col_data = numeric_df.iloc[:, i].dropna().values.astype(float)
                            groups[f'sample_{i+1}'] = col_data
                        
                        detection_result.update({
                            'data_type': 'multi_sample',
                            'sample_count': len(groups),
                            'structure': 'matrix_columns',
                            'group_names': list(groups.keys()),
                            'group_sizes': {k: len(v) for k, v in groups.items()},
                            'parsed_data': groups,
                            'recommendations': ['适合进行多样本比较分析', '可以对每个样本分别进行回归分析']
                        })
                        return detection_result
            except Exception as e:
                logger.error(f"数组数据处理失败: {str(e)}")
                raise ValueError(f"数组数据处理失败: {str(e)}")
        
        else:
            raise ValueError("不支持的数据格式")
        
        return detection_result
        
    except Exception as e:
        logger.error(f"数据结构检测错误: {str(e)}")
        raise ValueError(f"数据结构检测错误: {str(e)}")

def auto_regression_analysis(data_input: Union[str, List, Dict, np.ndarray], 
                           method: str = 'auto') -> Dict[str, Any]:
    """
    自动进行回归分析：根据数据结构自动选择分析方法
    
    Args:
        data_input: 输入数据
        method: 回归方法 ('auto', 'loess', 'kernel', 'spline', 'local_poly')
        
    Returns:
        分析结果字典
    """
    try:
        # 检测数据结构
        detection = detect_data_structure(data_input)
        
        analysis_result = {
            'data_detection': detection,
            'regression_results': {},
            'visualizations': {},
            'summary': {}
        }
        
        if detection['data_type'] == 'single_sample':
            # 单样本回归分析
            x = detection['parsed_data']['x']
            y = detection['parsed_data']['y']
            
            if method == 'auto':
                result = regression_analyzer.auto_select_method(x, y)
            elif method == 'loess':
                result = regression_analyzer.loess_regression(x, y)
            elif method == 'kernel':
                result = regression_analyzer.kernel_regression(x, y)
            elif method == 'spline':
                result = regression_analyzer.spline_regression(x, y)
            elif method == 'local_poly':
                result = regression_analyzer.local_polynomial(x, y)
            else:
                raise ValueError(f"不支持的回归方法: {method}")
            
            analysis_result['regression_results']['single_sample'] = result
            analysis_result['summary'] = {
                'analysis_type': '单样本非参数回归',
                'method_used': result['method'],
                'sample_size': len(x),
                'r_squared': result['r_squared'],
                'rmse': result['rmse']
            }
        
        elif detection['data_type'] == 'multi_sample':
            # 多样本回归分析
            groups = detection['parsed_data']
            group_results = {}
            
            for group_name, group_data in groups.items():
                # 为每个组创建时间序列数据
                x = np.arange(len(group_data))
                y = group_data
                
                if method == 'auto':
                    result = regression_analyzer.auto_select_method(x, y)
                elif method == 'loess':
                    result = regression_analyzer.loess_regression(x, y)
                elif method == 'kernel':
                    result = regression_analyzer.kernel_regression(x, y)
                elif method == 'spline':
                    result = regression_analyzer.spline_regression(x, y)
                elif method == 'local_poly':
                    result = regression_analyzer.local_polynomial(x, y)
                else:
                    raise ValueError(f"不支持的回归方法: {method}")
                
                group_results[group_name] = result
            
            analysis_result['regression_results']['multi_sample'] = group_results
            
            # 计算汇总统计
            avg_r2 = np.mean([r['r_squared'] for r in group_results.values()])
            avg_rmse = np.mean([r['rmse'] for r in group_results.values()])
            
            analysis_result['summary'] = {
                'analysis_type': '多样本非参数回归',
                'group_count': len(groups),
                'average_r_squared': avg_r2,
                'average_rmse': avg_rmse,
                'group_performance': {name: {'r_squared': r['r_squared'], 'rmse': r['rmse']} 
                                    for name, r in group_results.items()}
            }
        
        return analysis_result
        
    except Exception as e:
        raise ValueError(f"自动回归分析错误: {str(e)}")

def parse_data_input(data_input: Union[str, List, Dict], province_name: str = None) -> tuple:
    """
    解析数据输入，支持多种格式
    与数据预处理服务保持一致的处理方式，避免字典格式乱码问题
    
    Args:
        data_input: 数据输入，可以是JSON字符串、列表或字典
        province_name: 省份名称，用于从矩阵数据中选择特定行
        
    Returns:
        (x, y) 数组元组
    """
    try:
        # 首先解析输入数据
        if isinstance(data_input, str):
            try:
                data = json.loads(data_input)
            except json.JSONDecodeError as e:
                logger.error(f"JSON解析失败: {str(e)}")
                raise ValueError(f"无效的JSON字符串: {str(e)}")
        else:
            data = data_input
        
        # 处理数据预处理服务的输出格式
        if isinstance(data, dict):
            # 检查是否是数据预处理服务的输出格式
            if 'success' in data and 'data' in data and 'output_format' in data:
                logger.info(f"检测到数据预处理服务输出格式，output_format: {data.get('output_format')}")
                
                preprocessed_data = data['data']
                output_format = data['output_format']
                
                # 使用pandas DataFrame处理数据，避免乱码问题
                if output_format == 'matrix':
                    # matrix格式：[[row1], [row2], ...]
                    # 每一行代表一个省份/地区的时间序列数据
                    # 转换为DataFrame进行安全处理
                    try:
                        df = pd.DataFrame(preprocessed_data)
                        # 转换为数值类型，处理可能的编码问题
                        numeric_df = df.apply(pd.to_numeric, errors='coerce')
                        
                        if len(numeric_df) >= 1:
                            # 检查是否有data_info中的行名信息来确定具体使用哪一行
                            data_info = data.get('data_info', {})
                            
                            # 重要：如果数据预处理时已经指定了target_row，那么返回的数据只有一行
                            # 这种情况下应该直接使用第0行数据，而不是尝试查找原始索引
                            if 'target_row' in data_info:
                                # 数据预处理已经提取了特定行，直接使用第0行
                                selected_row = 0
                                target_row = data_info['target_row']
                                logger.info(f"数据预处理已提取行 '{target_row}'，使用返回数据的第0行")
                            elif 'row_names' in data_info:
                                row_names = data_info['row_names']
                                logger.info(f"检测到行名信息: {row_names}")
                                
                                # 默认使用第一行数据（第一个省份）
                                selected_row = 0
                                
                                # 如果指定了省份名称，尝试查找对应的行
                                if province_name:
                                    try:
                                        selected_row = row_names.index(province_name)
                                        logger.info(f"找到指定省份 '{province_name}'，使用第{selected_row}行数据")
                                    except ValueError:
                                        logger.warning(f"找不到指定省份 '{province_name}'，可用省份: {row_names}")
                                        logger.warning(f"使用默认第一行数据: {row_names[0] if row_names else '未知'}")
                                        selected_row = 0
                                else:
                                    # 没有指定省份时，使用第一行作为默认
                                    logger.info(f"使用第{selected_row}行数据（对应: {row_names[selected_row] if selected_row < len(row_names) else '未知'}）")
                            else:
                                # 没有行名信息时，使用第一行
                                selected_row = 0
                                logger.info("没有行名信息，使用第一行数据")
                            
                            # 提取选定行的数据作为y值
                            y = numeric_df.iloc[selected_row].dropna().values.astype(float)
                            # 生成对应的x值（通常是年份或时间序列索引）
                            x = np.arange(len(y), dtype=float)
                            
                            # 如果有列名信息（如年份），使用实际的列名作为x值
                            if 'column_names' in data_info:
                                column_names = data_info['column_names']
                                if len(column_names) >= len(y):
                                    try:
                                        # 尝试将列名转换为数值（如年份）
                                        x_from_columns = []
                                        for i, col_name in enumerate(column_names[:len(y)]):
                                            try:
                                                # 尝试直接转换为数字
                                                x_val = float(col_name)
                                                x_from_columns.append(x_val)
                                            except (ValueError, TypeError):
                                                # 如果不能转换为数字，使用索引
                                                x_from_columns.append(float(i))
                                        
                                        if len(x_from_columns) == len(y):
                                            x = np.array(x_from_columns, dtype=float)
                                            logger.info(f"使用列名作为x值: {x[:5]}...")
                                    except Exception as e:
                                        logger.warning(f"使用列名作为x值失败，使用索引: {str(e)}")
                            
                            logger.info(f"从matrix格式解析数据: x长度={len(x)}, y长度={len(y)}")
                        else:
                            raise ValueError("matrix格式数据至少需要1行")
                    except Exception as e:
                        logger.error(f"matrix格式数据处理失败: {str(e)}")
                        raise ValueError(f"matrix格式数据处理失败: {str(e)}")
                        
                elif output_format == 'groups':
                    # groups格式：[[group1], [group2], ...]
                    # 使用pandas处理，避免编码问题
                    try:
                        if len(preprocessed_data) >= 1 and len(preprocessed_data[0]) > 0:
                            # 将第一组数据转换为Series进行安全处理
                            series = pd.Series(preprocessed_data[0])
                            y = pd.to_numeric(series, errors='coerce').dropna().values.astype(float)
                            x = np.arange(len(y), dtype=float)  # 生成索引作为x值
                            
                            logger.info(f"从groups格式解析数据: x长度={len(x)}, y长度={len(y)}")
                        else:
                            raise ValueError("groups格式数据至少需要1组非空数据")
                    except Exception as e:
                        logger.error(f"groups格式数据处理失败: {str(e)}")
                        raise ValueError(f"groups格式数据处理失败: {str(e)}")
                        
                else:
                    raise ValueError(f"不支持的数据预处理输出格式: {output_format}")
                    
            # 处理标准的{'x': [], 'y': []}格式
            elif 'x' in data and 'y' in data:
                try:
                    # 使用pandas Series处理，避免编码问题
                    x_series = pd.Series(data['x'])
                    y_series = pd.Series(data['y'])
                    
                    x = pd.to_numeric(x_series, errors='coerce').dropna().values.astype(float)
                    y = pd.to_numeric(y_series, errors='coerce').dropna().values.astype(float)
                    
                    # 确保长度一致
                    min_len = min(len(x), len(y))
                    x = x[:min_len]
                    y = y[:min_len]
                    
                    logger.info(f"从标准字典格式解析数据: x长度={len(x)}, y长度={len(y)}")
                except Exception as e:
                    logger.error(f"标准字典格式数据处理失败: {str(e)}")
                    raise ValueError(f"标准字典格式数据处理失败: {str(e)}")
                
            else:
                raise ValueError("字典格式数据必须包含'x'和'y'键，或者是数据预处理服务的输出格式")
                
        elif isinstance(data, list):
            try:
                if len(data) == 2 and all(isinstance(item, list) for item in data):
                    # 使用pandas处理列表数据
                    x_series = pd.Series(data[0])
                    y_series = pd.Series(data[1])
                    
                    x = pd.to_numeric(x_series, errors='coerce').dropna().values.astype(float)
                    y = pd.to_numeric(y_series, errors='coerce').dropna().values.astype(float)
                    
                    # 确保长度一致
                    min_len = min(len(x), len(y))
                    x = x[:min_len]
                    y = y[:min_len]
                    
                    logger.info(f"从列表格式解析数据: x长度={len(x)}, y长度={len(y)}")
                else:
                    raise ValueError("列表格式数据必须是[[x_values], [y_values]]")
            except Exception as e:
                logger.error(f"列表格式数据处理失败: {str(e)}")
                raise ValueError(f"列表格式数据处理失败: {str(e)}")
        else:
            raise ValueError("不支持的数据格式")
        
        # 最终验证
        if len(x) == 0 or len(y) == 0:
            raise ValueError("解析后的数据为空")
            
        if len(x) != len(y):
            logger.warning(f"x和y长度不匹配: x={len(x)}, y={len(y)}，已自动调整为相同长度")
        
        logger.info(f"数据解析成功: x长度={len(x)}, y长度={len(y)}")
        return x, y
        
    except Exception as e:
        logger.error(f"数据解析错误: {str(e)}")
        raise ValueError(f"数据解析错误: {str(e)}")

# 旧的create_plot_base64函数已移至文件开头并改进

def format_result(result: Dict[str, Any], include_raw: bool = False) -> Dict[str, Any]:
    """
    格式化结果输出
    
    Args:
        result: 原始结果字典
        include_raw: 是否包含原始数据
        
    Returns:
        格式化的结果
    """
    def convert_to_list(value):
        """将NumPy数组转换为Python列表"""
        if hasattr(value, 'tolist'):
            return value.tolist()
        elif isinstance(value, dict):
            return {k: convert_to_list(v) for k, v in value.items()}
        elif isinstance(value, (list, tuple)):
            return [convert_to_list(item) for item in value]
        else:
            return value
    
    # 首先完全转换整个result字典
    converted_result = convert_to_list(result)
    
    formatted = {
        'success': True,
        'method': converted_result.get('method', 'Unknown'),
        'statistics': {
            'mse': converted_result.get('mse'),
            'rmse': converted_result.get('rmse'),
            'r_squared': converted_result.get('r_squared'),
            'sample_size': converted_result.get('sample_size')
        },
        'parameters': converted_result.get('parameters', {})
    }
    
    if include_raw:
        formatted['raw_data'] = {
            'fitted_x': converted_result.get('fitted_x'),
            'fitted_y': converted_result.get('fitted_y'),
            'residuals': converted_result.get('residuals'),
            'confidence_intervals': converted_result.get('confidence_intervals'),
            'x': converted_result.get('x'),
            'y': converted_result.get('y')
        }
    
    # 添加自动选择信息
    if 'auto_selection' in converted_result:
        formatted['auto_selection'] = converted_result['auto_selection']
    
    return formatted

@server.list_tools()
async def handle_list_tools() -> list[Tool]:
    """
    列出所有可用的工具
    """
    tools = [
        Tool(
            name="loess_regression",
            description="执行LOESS局部回归分析",
            inputSchema={
                "type": "object",
                "properties": {
                    "data": {
                        "type": ["string", "object"],
                        "description": "输入数据，格式：{'x': [x_values], 'y': [y_values]} 或 JSON字符串"
                    },
                    "span": {
                        "type": "number",
                        "description": "平滑参数(0-1)，默认0.75",
                        "default": 0.75
                    },
                    "degree": {
                        "type": "integer",
                        "description": "多项式阶数(1或2)，默认2",
                        "default": 2
                    },
                    "robust": {
                        "type": "boolean",
                        "description": "是否使用鲁棒版本，默认true",
                        "default": True
                    },
                    "include_raw": {
                        "type": "boolean",
                        "description": "是否包含原始数据，默认false",
                        "default": False
                    }
                },
                "required": ["data"]
            }
        ),
        Tool(
            name="kernel_regression",
            description="执行核回归(Nadaraya-Watson)分析",
            inputSchema={
                "type": "object",
                "properties": {
                    "data": {
                        "type": ["string", "object"],
                        "description": "输入数据，格式：{'x': [x_values], 'y': [y_values]} 或 JSON字符串"
                    },
                    "bandwidth": {
                        "type": "number",
                        "description": "带宽参数，默认自动选择"
                    },
                    "kernel": {
                        "type": "string",
                        "description": "核函数类型，默认'gaussian'",
                        "default": "gaussian"
                    },
                    "include_raw": {
                        "type": "boolean",
                        "description": "是否包含原始数据，默认false",
                        "default": False
                    }
                },
                "required": ["data"]
            }
        ),
        Tool(
            name="spline_regression",
            description="执行样条回归分析",
            inputSchema={
                "type": "object",
                "properties": {
                    "data": {
                        "type": ["string", "object"],
                        "description": "输入数据，格式：{'x': [x_values], 'y': [y_values]} 或 JSON字符串"
                    },
                    "spline_type": {
                        "type": "string",
                        "description": "样条类型('linear', 'cubic')，默认'cubic'",
                        "default": "cubic"
                    },
                    "smoothing": {
                        "type": "number",
                        "description": "平滑参数"
                    },
                    "include_raw": {
                        "type": "boolean",
                        "description": "是否包含原始数据，默认false",
                        "default": False
                    }
                },
                "required": ["data"]
            }
        ),
        Tool(
            name="local_polynomial_regression",
            description="执行局部多项式回归分析",
            inputSchema={
                "type": "object",
                "properties": {
                    "data": {
                        "type": ["string", "object"],
                        "description": "输入数据，格式：{'x': [x_values], 'y': [y_values]} 或 JSON字符串"
                    },
                    "degree": {
                        "type": "integer",
                        "description": "多项式阶数，默认1",
                        "default": 1
                    },
                    "bandwidth": {
                        "type": "number",
                        "description": "带宽参数，默认自动选择"
                    },
                    "include_raw": {
                        "type": "boolean",
                        "description": "是否包含原始数据，默认false",
                        "default": False
                    }
                },
                "required": ["data"]
            }
        ),
        Tool(
            name="auto_select_regression",
            description="自动选择最佳回归方法",
            inputSchema={
                "type": "object",
                "properties": {
                    "data": {
                        "type": ["string", "object"],
                        "description": "输入数据，格式：{'x': [x_values], 'y': [y_values]} 或 JSON字符串"
                    },
                    "include_raw": {
                        "type": "boolean",
                        "description": "是否包含原始数据，默认false",
                        "default": False
                    }
                },
                "required": ["data"]
            }
        ),
        Tool(
            name="predict_values",
            description="使用拟合模型预测新值",
            inputSchema={
                "type": "object",
                "properties": {
                    "model_id": {
                        "type": "string",
                        "description": "模型ID"
                    },
                    "x_new": {
                        "type": "array",
                        "description": "新的x值数组",
                        "items": {"type": "number"}
                    }
                },
                "required": ["model_id", "x_new"]
            }
        ),
        Tool(
            name="compare_methods",
            description="比较多种回归方法",
            inputSchema={
                "type": "object",
                "properties": {
                    "data": {
                        "type": ["string", "object"],
                        "description": "输入数据，格式：{'x': [x_values], 'y': [y_values]} 或 JSON字符串"
                    },
                    "methods": {
                        "type": "array",
                        "description": "要比较的方法列表，默认['loess', 'kernel', 'spline', 'local_poly']",
                        "items": {"type": "string"},
                        "default": ["loess", "kernel", "spline", "local_poly"]
                    }
                },
                "required": ["data"]
            }
        ),
        Tool(
            name="regression_diagnostics",
            description="回归诊断分析",
            inputSchema={
                "type": "object",
                "properties": {
                    "model_id": {
                        "type": "string",
                        "description": "模型ID"
                    }
                },
                "required": ["model_id"]
            }
        ),
        Tool(
            name="sample_size_analysis",
            description="样本量分析和方法推荐",
            inputSchema={
                "type": "object",
                "properties": {
                    "data": {
                        "type": ["string", "object"],
                        "description": "输入数据，格式：{'x': [x_values], 'y': [y_values]} 或 JSON字符串"
                    }
                },
                "required": ["data"]
            }
        ),
        Tool(
            name="method_recommendation",
            description="基于数据特征推荐回归方法",
            inputSchema={
                "type": "object",
                "properties": {
                    "data": {
                        "type": ["string", "object"],
                        "description": "输入数据，格式：{'x': [x_values], 'y': [y_values]} 或 JSON字符串"
                    }
                },
                "required": ["data"]
            }
        ),
        Tool(
            name="plot_regression",
            description="绘制回归拟合图",
            inputSchema={
                "type": "object",
                "properties": {
                    "model_id": {
                        "type": "string",
                        "description": "模型ID"
                    },
                    "title": {
                        "type": "string",
                        "description": "图表标题"
                    },
                    "x_label": {
                        "type": "string",
                        "description": "X轴标签，默认'X'",
                        "default": "X"
                    },
                    "y_label": {
                        "type": "string",
                        "description": "Y轴标签，默认'Y'",
                        "default": "Y"
                    },
                    "show_confidence": {
                        "type": "boolean",
                        "description": "是否显示置信区间，默认true",
                        "default": True
                    }
                },
                "required": ["model_id"]
            }
        ),
        Tool(
            name="plot_residuals",
            description="绘制残差图",
            inputSchema={
                "type": "object",
                "properties": {
                    "model_id": {
                        "type": "string",
                        "description": "模型ID"
                    }
                },
                "required": ["model_id"]
            }
        ),
        Tool(
            name="plot_comparison",
            description="绘制方法比较图",
            inputSchema={
                "type": "object",
                "properties": {
                    "data": {
                        "type": ["string", "object"],
                        "description": "输入数据，格式：{'x': [x_values], 'y': [y_values]} 或 JSON字符串"
                    },
                    "methods": {
                        "type": "array",
                        "description": "要比较的方法列表",
                        "items": {"type": "string"},
                        "default": ["loess", "kernel", "spline"]
                    }
                },
                "required": ["data"]
            }
        ),
        Tool(
            name="plot_diagnostics",
            description="绘制综合诊断图",
            inputSchema={
                "type": "object",
                "properties": {
                    "model_id": {
                        "type": "string",
                        "description": "模型ID"
                    }
                },
                "required": ["model_id"]
            }
        ),
        Tool(
            name="plot_complete_analysis",
            description="创建包含历史数据和预测数据的完整可视化图表",
            inputSchema={
                "type": "object",
                "properties": {
                    "historical_data": {
                        "type": ["string", "object"],
                        "description": "历史数据，格式：{'x': [x_values], 'y': [y_values]} 或 JSON字符串"
                    },
                    "prediction_years": {
                        "type": "array",
                        "items": {"type": "number"},
                        "description": "需要预测的年份或x值数组"
                    },
                    "province_name": {
                        "type": "string",
                        "description": "省份名称，用于从矩阵数据中选择特定行进行分析（可选）"
                    },
                    "title": {
                        "type": "string",
                        "description": "图表标题",
                        "default": "历史数据与预测分析"
                    },
                    "x_label": {
                        "type": "string",
                        "description": "X轴标签",
                        "default": "年份"
                    },
                    "y_label": {
                        "type": "string",
                        "description": "Y轴标签",
                        "default": "数值"
                    },
                    "method": {
                        "type": "string",
                        "description": "回归方法，可选：loess, kernel, spline, local_poly, auto",
                        "default": "auto"
                    },
                    "show_confidence": {
                        "type": "boolean",
                        "description": "是否显示置信区间",
                        "default": True
                    },
                    "save_path": {
                        "type": "string",
                        "description": "图表保存路径（可选）"
                    }
                },
                "required": ["historical_data", "prediction_years"]
            }
        ),
        Tool(
            name="detect_data_structure",
            description="自动检测数据结构：识别单样本或多样本数据",
            inputSchema={
                "type": "object",
                "properties": {
                    "data": {
                        "type": ["string", "object", "array"],
                        "description": "输入数据，支持多种格式：JSON字符串、字典、数组等"
                    }
                },
                "required": ["data"]
            }
        ),
        Tool(
            name="auto_regression_analysis",
            description="自动进行回归分析：根据数据结构自动选择分析方法",
            inputSchema={
                "type": "object",
                "properties": {
                    "data": {
                        "type": ["string", "object", "array"],
                        "description": "输入数据，支持多种格式"
                    },
                    "method": {
                        "type": "string",
                        "description": "回归方法，可选：auto, loess, kernel, spline, local_poly",
                        "default": "auto"
                    },
                    "include_visualization": {
                        "type": "boolean",
                        "description": "是否包含可视化结果",
                        "default": True
                    }
                },
                "required": ["data"]
            }
        ),
        Tool(
            name="multi_sample_visualization",
            description="多样本数据可视化：为多个样本创建对比图表",
            inputSchema={
                "type": "object",
                "properties": {
                    "data": {
                        "type": ["string", "object"],
                        "description": "多样本数据"
                    },
                    "method": {
                        "type": "string",
                        "description": "回归方法",
                        "default": "auto"
                    },
                    "title": {
                        "type": "string",
                        "description": "图表标题",
                        "default": "多样本非参数回归分析"
                    },
                    "save_path": {
                        "type": "string",
                        "description": "图表保存路径（可选）"
                    }
                },
                "required": ["data"]
            }
        )
    ]
    
    return tools

# 工具处理函数
async def handle_loess_regression(arguments: Dict[str, Any]) -> list[TextContent]:
    """
    处理LOESS回归
    """
    try:
        x, y = parse_data_input(arguments["data"])
        span = arguments.get("span", 0.75)
        degree = arguments.get("degree", 2)
        robust = arguments.get("robust", True)
        include_raw = arguments.get("include_raw", False)
        
        result = regression_analyzer.loess_regression(
            x, y, span=span, degree=degree, robust=robust
        )
        
        # 存储模型
        model_counter = len([k for k in fitted_models.keys() if k.startswith('loess_')])
        model_id = f"loess_{model_counter}"
        result['x'] = x.tolist()
        result['y'] = y.tolist()
        
        def convert_to_list(value):
            if hasattr(value, 'tolist'):
                return value.tolist()
            elif isinstance(value, dict):
                return {k: convert_to_list(v) for k, v in value.items()}
            elif isinstance(value, (list, tuple)):
                return [convert_to_list(item) for item in value]
            else:
                return value
        
        serializable_result = convert_to_list(result)
        fitted_models[model_id] = serializable_result
        
        formatted_result = format_result(result, include_raw)
        formatted_result["model_id"] = model_id
        
        return [
            TextContent(
                type="text",
                text=json.dumps(formatted_result, ensure_ascii=False, indent=2)
            )
        ]
        
    except Exception as e:
        raise ValueError(f"LOESS回归错误: {str(e)}")

async def handle_kernel_regression(arguments: Dict[str, Any]) -> list[TextContent]:
    """
    处理核回归
    """
    try:
        x, y = parse_data_input(arguments["data"])
        bandwidth = arguments.get("bandwidth")
        kernel = arguments.get("kernel", "gaussian")
        include_raw = arguments.get("include_raw", False)
        
        result = regression_analyzer.kernel_regression(
            x, y, bandwidth=bandwidth, kernel=kernel
        )
        
        # 存储模型
        model_counter = len([k for k in fitted_models.keys() if k.startswith('kernel_')])
        model_id = f"kernel_{model_counter}"
        result['x'] = x.tolist()
        result['y'] = y.tolist()
        
        def convert_to_list(value):
            if hasattr(value, 'tolist'):
                return value.tolist()
            elif isinstance(value, dict):
                return {k: convert_to_list(v) for k, v in value.items()}
            elif isinstance(value, (list, tuple)):
                return [convert_to_list(item) for item in value]
            else:
                return value
        
        serializable_result = convert_to_list(result)
        fitted_models[model_id] = serializable_result
        
        formatted_result = format_result(result, include_raw)
        formatted_result["model_id"] = model_id
        
        return [
            TextContent(
                type="text",
                text=json.dumps(formatted_result, ensure_ascii=False, indent=2)
            )
        ]
        
    except Exception as e:
        raise ValueError(f"核回归错误: {str(e)}")

async def handle_spline_regression(arguments: Dict[str, Any]) -> list[TextContent]:
    """
    处理样条回归
    """
    try:
        x, y = parse_data_input(arguments["data"])
        spline_type = arguments.get("spline_type", "cubic")
        smoothing = arguments.get("smoothing")
        include_raw = arguments.get("include_raw", False)
        
        result = regression_analyzer.spline_regression(
            x, y, spline_type=spline_type, smoothing=smoothing
        )
        
        # 存储模型
        model_counter = len([k for k in fitted_models.keys() if k.startswith('spline_')])
        model_id = f"spline_{model_counter}"
        result['x'] = x.tolist()
        result['y'] = y.tolist()
        
        def convert_to_list(value):
            if hasattr(value, 'tolist'):
                return value.tolist()
            elif isinstance(value, dict):
                return {k: convert_to_list(v) for k, v in value.items()}
            elif isinstance(value, (list, tuple)):
                return [convert_to_list(item) for item in value]
            else:
                return value
        
        serializable_result = convert_to_list(result)
        fitted_models[model_id] = serializable_result
        
        formatted_result = format_result(result, include_raw)
        formatted_result["model_id"] = model_id
        
        return [
            TextContent(
                type="text",
                text=json.dumps(formatted_result, ensure_ascii=False, indent=2)
            )
        ]
        
    except Exception as e:
        raise ValueError(f"样条回归错误: {str(e)}")

async def handle_local_polynomial_regression(arguments: Dict[str, Any]) -> list[TextContent]:
    """
    处理局部多项式回归
    """
    try:
        x, y = parse_data_input(arguments["data"])
        degree = arguments.get("degree", 1)
        bandwidth = arguments.get("bandwidth")
        include_raw = arguments.get("include_raw", False)
        
        result = regression_analyzer.local_polynomial(
            x, y, degree=degree, bandwidth=bandwidth
        )
        
        # 存储模型
        model_counter = len([k for k in fitted_models.keys() if k.startswith('local_poly_')])
        model_id = f"local_poly_{model_counter}"
        result['x'] = x.tolist()
        result['y'] = y.tolist()
        
        def convert_to_list(value):
            if hasattr(value, 'tolist'):
                return value.tolist()
            elif isinstance(value, dict):
                return {k: convert_to_list(v) for k, v in value.items()}
            elif isinstance(value, (list, tuple)):
                return [convert_to_list(item) for item in value]
            else:
                return value
        
        serializable_result = convert_to_list(result)
        fitted_models[model_id] = serializable_result
        
        formatted_result = format_result(result, include_raw)
        formatted_result["model_id"] = model_id
        
        return [
            TextContent(
                type="text",
                text=json.dumps(formatted_result, ensure_ascii=False, indent=2)
            )
        ]
        
    except Exception as e:
        raise ValueError(f"局部多项式回归错误: {str(e)}")

async def handle_auto_select_regression(arguments: Dict[str, Any]) -> list[TextContent]:
    """
    处理自动选择回归方法
    """
    try:
        x, y = parse_data_input(arguments["data"])
        include_raw = arguments.get("include_raw", False)
        
        result = regression_analyzer.auto_select_method(x, y)
        
        # 存储模型
        model_counter = len([k for k in fitted_models.keys() if k.startswith('auto_')])
        model_id = f"auto_{model_counter}"
        result['x'] = x.tolist()
        result['y'] = y.tolist()
        
        def convert_to_list(value):
            if hasattr(value, 'tolist'):
                return value.tolist()
            elif isinstance(value, dict):
                return {k: convert_to_list(v) for k, v in value.items()}
            elif isinstance(value, (list, tuple)):
                return [convert_to_list(item) for item in value]
            else:
                return value
        
        serializable_result = convert_to_list(result)
        fitted_models[model_id] = serializable_result
        
        formatted_result = format_result(result, include_raw)
        formatted_result["model_id"] = model_id
        
        return [
            TextContent(
                type="text",
                text=json.dumps(formatted_result, ensure_ascii=False, indent=2)
            )
        ]
        
    except Exception as e:
        raise ValueError(f"自动选择回归方法错误: {str(e)}")

async def handle_predict_values(arguments: Dict[str, Any]) -> list[TextContent]:
    """
    处理预测新值
    """
    try:
        model_id = arguments["model_id"]
        x_new = np.array(arguments["x_new"])
        
        if model_id not in fitted_models:
            raise ValueError(f"模型ID {model_id} 不存在")
        
        model_result = fitted_models[model_id]
        y_pred = regression_analyzer.predict(model_result, x_new)
        
        return [
            TextContent(
                type="text",
                text=json.dumps({
                    "success": True,
                    "model_id": model_id,
                    "x_new": x_new.tolist(),
                    "y_predicted": y_pred.tolist(),
                    "method": model_result["method"]
                }, ensure_ascii=False, indent=2)
            )
        ]
        
    except Exception as e:
        raise ValueError(f"预测错误: {str(e)}")

async def handle_compare_methods(arguments: Dict[str, Any]) -> list[TextContent]:
    """
    处理方法比较
    """
    try:
        x, y = parse_data_input(arguments["data"])
        methods = arguments.get("methods", ["loess", "kernel", "spline", "local_poly"])
        
        results = {}
        
        for method in methods:
            try:
                if method == "loess":
                    result = regression_analyzer.loess_regression(x, y)
                elif method == "kernel":
                    result = regression_analyzer.kernel_regression(x, y)
                elif method == "spline":
                    result = regression_analyzer.spline_regression(x, y)
                elif method == "local_poly":
                    result = regression_analyzer.local_polynomial(x, y)
                else:
                    continue
                
                results[method] = {
                    "method": result["method"],
                    "mse": result["mse"],
                    "rmse": result["rmse"],
                    "r_squared": result["r_squared"]
                }
            except Exception as e:
                results[method] = {"error": str(e)}
        
        # 找出最佳方法
        valid_results = {k: v for k, v in results.items() if "error" not in v}
        if valid_results:
            best_method = min(valid_results.keys(), key=lambda k: valid_results[k]["mse"])
        else:
            best_method = None
        
        return [
            TextContent(
                type="text",
                text=json.dumps({
                    "success": True,
                    "comparison_results": results,
                    "best_method": best_method,
                    "ranking": sorted(valid_results.keys(), key=lambda k: valid_results[k]["mse"]) if valid_results else []
                }, ensure_ascii=False, indent=2)
            )
        ]
        
    except Exception as e:
        raise ValueError(f"方法比较错误: {str(e)}")

async def handle_regression_diagnostics(arguments: Dict[str, Any]) -> list[TextContent]:
    """
    处理回归诊断
    """
    try:
        model_id = arguments["model_id"]
        
        if model_id not in fitted_models:
            raise ValueError(f"模型ID {model_id} 不存在")
        
        model_result = fitted_models[model_id]
        residuals = np.array(model_result["residuals"])
        fitted_y = np.array(model_result["fitted_y"])
        
        # 计算诊断统计量
        diagnostics = {
            "residual_statistics": {
                "mean": float(np.mean(residuals)),
                "std": float(np.std(residuals)),
                "min": float(np.min(residuals)),
                "max": float(np.max(residuals)),
                "skewness": float(stats.skew(residuals)),
                "kurtosis": float(stats.kurtosis(residuals))
            },
            "normality_test": {
                "shapiro_wilk": {
                    "statistic": float(stats.shapiro(residuals)[0]),
                    "p_value": float(stats.shapiro(residuals)[1])
                }
            },
            "heteroscedasticity": {
                "correlation_fitted_residuals": float(np.corrcoef(fitted_y, np.abs(residuals))[0, 1])
            }
        }
        
        return [
            TextContent(
                type="text",
                text=json.dumps({
                    "success": True,
                    "model_id": model_id,
                    "method": model_result["method"],
                    "diagnostics": diagnostics
                }, ensure_ascii=False, indent=2)
            )
        ]
        
    except Exception as e:
        raise ValueError(f"回归诊断错误: {str(e)}")

async def handle_sample_size_analysis(arguments: Dict[str, Any]) -> list[TextContent]:
    """
    处理样本量分析
    """
    try:
        x, y = parse_data_input(arguments["data"])
        n = len(x)
        
        # 样本量分类
        if n < 50:
            size_category = "小样本"
            recommendations = [
                "建议使用LOESS或LOWESS方法",
                "避免使用复杂的样条回归",
                "谨慎解释结果，可能存在较大不确定性"
            ]
        elif n < 200:
            size_category = "中等样本"
            recommendations = [
                "可以使用LOESS、核回归或样条回归",
                "建议比较多种方法的性能",
                "注意带宽/平滑参数的选择"
            ]
        else:
            size_category = "大样本"
            recommendations = [
                "所有非参数方法都适用",
                "样条回归可能表现最佳",
                "可以使用更复杂的模型"
            ]
        
        # 数据特征分析
        x_range = float(np.max(x) - np.min(x))
        y_cv = float(np.std(y) / np.mean(y)) if np.mean(y) != 0 else float('inf')
        
        analysis = {
            "sample_size": n,
            "size_category": size_category,
            "data_characteristics": {
                "x_range": x_range,
                "y_coefficient_of_variation": y_cv,
                "x_mean": float(np.mean(x)),
                "y_mean": float(np.mean(y)),
                "x_std": float(np.std(x)),
                "y_std": float(np.std(y))
            },
            "recommendations": recommendations,
            "method_suitability": {
                "loess": "适用" if n >= 10 else "不推荐",
                "kernel_regression": "适用" if n >= 30 else "谨慎使用",
                "spline_regression": "适用" if n >= 50 else "不推荐",
                "local_polynomial": "适用" if n >= 20 else "谨慎使用"
            }
        }
        
        return [
            TextContent(
                type="text",
                text=json.dumps({
                    "success": True,
                    "analysis": analysis
                }, ensure_ascii=False, indent=2)
            )
        ]
        
    except Exception as e:
        raise ValueError(f"样本量分析错误: {str(e)}")

async def handle_method_recommendation(arguments: Dict[str, Any]) -> list[TextContent]:
    """
    处理方法推荐
    """
    try:
        x, y = parse_data_input(arguments["data"])
        n = len(x)
        
        # 数据特征分析
        x_range = np.max(x) - np.min(x)
        y_cv = np.std(y) / np.mean(y) if np.mean(y) != 0 else float('inf')
        
        # 检查数据的线性趋势
        correlation = np.corrcoef(x, y)[0, 1]
        
        # 推荐逻辑
        recommendations = []
        
        if n < 50:
            recommendations.append({
                "method": "LOESS",
                "reason": "小样本量，LOESS能够提供稳定的局部拟合",
                "priority": 1,
                "parameters": {"span": 0.75, "robust": True}
            })
        elif n < 200:
            if abs(correlation) > 0.8:
                recommendations.append({
                    "method": "样条回归",
                    "reason": "中等样本量且数据呈现较强趋势，样条回归效果好",
                    "priority": 1,
                    "parameters": {"spline_type": "cubic"}
                })
            else:
                recommendations.append({
                    "method": "核回归",
                    "reason": "中等样本量且数据趋势不明显，核回归适应性强",
                    "priority": 1,
                    "parameters": {"kernel": "gaussian"}
                })
        else:
            recommendations.append({
                "method": "样条回归",
                "reason": "大样本量，样条回归能够捕捉复杂模式",
                "priority": 1,
                "parameters": {"spline_type": "cubic"}
            })
            recommendations.append({
                "method": "核回归",
                "reason": "大样本量的备选方案",
                "priority": 2,
                "parameters": {"kernel": "gaussian"}
            })
        
        # 总是推荐自动选择作为备选
        recommendations.append({
            "method": "自动选择",
            "reason": "让算法自动选择最佳方法",
            "priority": 3,
            "parameters": {}
        })
        
        return [
            TextContent(
                type="text",
                text=json.dumps({
                    "success": True,
                    "data_summary": {
                        "sample_size": n,
                        "x_range": float(x_range),
                        "y_coefficient_of_variation": float(y_cv),
                        "correlation": float(correlation)
                    },
                    "recommendations": recommendations
                }, ensure_ascii=False, indent=2)
            )
        ]
        
    except Exception as e:
        raise ValueError(f"方法推荐错误: {str(e)}")

async def handle_plot_regression(arguments: Dict[str, Any]) -> list[TextContent]:
    """
    处理回归拟合图绘制
    """
    try:
        model_id = arguments["model_id"]
        title = arguments.get("title")
        x_label = arguments.get("x_label", "X")
        y_label = arguments.get("y_label", "Y")
        show_confidence = arguments.get("show_confidence", True)
        
        if model_id not in fitted_models:
            raise ValueError(f"模型ID {model_id} 不存在")
        
        model_result = fitted_models[model_id]
        x = np.array(model_result['x'])
        y = np.array(model_result['y'])
        fitted_y = np.array(model_result["fitted_y"])
        method = model_result["method"]
        
        # 确保中文字体正确显示
        ensure_chinese_display()
        
        # 创建图形
        fig, ax = plt.subplots(figsize=(10, 6))
        
        # 绘制原始数据点
        ax.scatter(x, y, alpha=0.6, color='blue', s=50, label='原始数据')
        
        # 绘制拟合曲线
        sort_idx = np.argsort(x)
        ax.plot(x[sort_idx], fitted_y[sort_idx], color='red', linewidth=2, label=f'{method}拟合')
        
        # 绘制置信区间（如果可用且要求显示）
        if show_confidence and 'confidence_intervals' in model_result:
            ci = model_result['confidence_intervals']
            if isinstance(ci, dict) and 'lower' in ci and 'upper' in ci:
                ci_lower = np.array(ci['lower'])
                ci_upper = np.array(ci['upper'])
                ax.fill_between(x[sort_idx], ci_lower[sort_idx], ci_upper[sort_idx], 
                              alpha=0.3, color='red', label='95%置信区间')
        
        # 设置标题和标签
        if title is None:
            title = f'{method}非参数回归拟合图'
        ax.set_title(title, fontsize=14, fontweight='bold')
        ax.set_xlabel(x_label, fontsize=12)
        ax.set_ylabel(y_label, fontsize=12)
        ax.legend()
        ax.grid(True, alpha=0.3)
        
        # 添加统计信息
        r_squared = model_result.get('r_squared', 0)
        rmse = model_result.get('rmse', 0)
        stats_text = f'R² = {r_squared:.3f}\nRMSE = {rmse:.3f}'
        ax.text(0.02, 0.98, stats_text, transform=ax.transAxes, 
               verticalalignment='top', bbox=dict(boxstyle='round', facecolor='lightblue', alpha=0.8))
        
        plt.tight_layout()
        
        # 转换为base64
        image_base64 = create_plot_base64(fig)
        
        return [
            TextContent(
                type="text",
                text=json.dumps({
                    "success": True,
                    "model_id": model_id,
                    "method": method,
                    "plot_type": "regression_fit",
                    "image_base64": image_base64,
                    "statistics": {
                        "r_squared": r_squared,
                        "rmse": rmse
                    }
                }, ensure_ascii=False, indent=2)
            )
        ]
        
    except Exception as e:
        raise ValueError(f"回归拟合图绘制错误: {str(e)}")

async def handle_plot_residuals(arguments: Dict[str, Any]) -> list[TextContent]:
    """
    处理残差图绘制
    """
    try:
        model_id = arguments["model_id"]
        
        if model_id not in fitted_models:
            raise ValueError(f"模型ID {model_id} 不存在")
        
        model_result = fitted_models[model_id]
        fitted_y = np.array(model_result["fitted_y"])
        residuals = np.array(model_result["residuals"])
        method = model_result["method"]
        
        # 确保中文字体正确显示
        ensure_chinese_display()
        
        # 创建2x2子图
        fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(16, 12))
        
        # 1. 拟合值vs残差图
        ax1.scatter(fitted_y, residuals, alpha=0.6, color='blue', s=50)
        ax1.axhline(y=0, color='red', linestyle='--', alpha=0.8)
        ax1.set_xlabel('拟合值', fontsize=12)
        ax1.set_ylabel('残差', fontsize=12)
        ax1.set_title(f'{method} - 拟合值vs残差图', fontsize=14, fontweight='bold')
        ax1.grid(True, alpha=0.3)
        
        # 2. Q-Q图
        stats.probplot(residuals, dist="norm", plot=ax2)
        ax2.set_title(f'{method} - 残差Q-Q图', fontsize=14, fontweight='bold')
        ax2.grid(True, alpha=0.3)
        
        # 3. 残差直方图
        ax3.hist(residuals, bins=20, alpha=0.7, color='skyblue', edgecolor='black')
        ax3.axvline(x=0, color='red', linestyle='--', alpha=0.8)
        ax3.set_xlabel('残差', fontsize=12)
        ax3.set_ylabel('频数', fontsize=12)
        ax3.set_title(f'{method} - 残差分布直方图', fontsize=14, fontweight='bold')
        ax3.grid(True, alpha=0.3)
        
        # 添加正态分布曲线
        x_norm = np.linspace(residuals.min(), residuals.max(), 100)
        y_norm = stats.norm.pdf(x_norm, np.mean(residuals), np.std(residuals))
        y_norm = y_norm * len(residuals) * (residuals.max() - residuals.min()) / 20
        ax3.plot(x_norm, y_norm, 'r-', linewidth=2, label='正态分布拟合')
        ax3.legend()
        
        # 4. 残差序列图
        ax4.plot(range(len(residuals)), residuals, 'o-', alpha=0.7, color='green')
        ax4.axhline(y=0, color='red', linestyle='--', alpha=0.8)
        ax4.set_xlabel('观测序号', fontsize=12)
        ax4.set_ylabel('残差', fontsize=12)
        ax4.set_title(f'{method} - 残差序列图', fontsize=14, fontweight='bold')
        ax4.grid(True, alpha=0.3)
        
        plt.tight_layout()
        
        # 转换为base64
        image_base64 = create_plot_base64(fig)
        
        return [
            TextContent(
                type="text",
                text=json.dumps({
                    "success": True,
                    "model_id": model_id,
                    "method": method,
                    "plot_type": "residuals_analysis",
                    "image_base64": image_base64,
                    "residual_statistics": {
                        "mean": float(np.mean(residuals)),
                        "std": float(np.std(residuals)),
                        "min": float(np.min(residuals)),
                        "max": float(np.max(residuals))
                    }
                }, ensure_ascii=False, indent=2)
            )
        ]
        
    except Exception as e:
        raise ValueError(f"残差图绘制错误: {str(e)}")

async def handle_plot_comparison(arguments: Dict[str, Any]) -> list[TextContent]:
    """
    处理多种回归方法比较图绘制
    """
    try:
        x, y = parse_data_input(arguments["data"])
        methods = arguments.get("methods", ["loess", "kernel", "spline"])
        
        # 确保中文字体正确显示
        ensure_chinese_display()
        
        # 创建图形
        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 6))
        
        # 左图：拟合曲线比较
        ax1.scatter(x, y, alpha=0.6, color='black', s=50, label='原始数据', zorder=5)
        
        colors = ['red', 'blue', 'green', 'orange', 'purple']
        comparison_results = {}
        
        for i, method in enumerate(methods):
            try:
                if method == "loess":
                    result = regression_analyzer.loess_regression(x, y)
                elif method == "kernel":
                    result = regression_analyzer.kernel_regression(x, y)
                elif method == "spline":
                    result = regression_analyzer.spline_regression(x, y)
                elif method == "local_poly":
                    result = regression_analyzer.local_polynomial(x, y)
                else:
                    continue
                
                fitted_y = np.array(result['fitted_y'])
                
                comparison_results[method] = {
                    "mse": result["mse"],
                    "rmse": result["rmse"],
                    "r_squared": result["r_squared"]
                }
                
                # 绘制拟合曲线
                sorted_indices = np.argsort(x)
                ax1.plot(x[sorted_indices], fitted_y[sorted_indices], 
                        color=colors[i % len(colors)], linewidth=2, 
                        label=f'{method.upper()} (R²={result["r_squared"]:.3f})', alpha=0.8)
                
            except Exception as e:
                logger.warning(f"方法 {method} 拟合失败: {str(e)}")
                continue
        
        ax1.set_xlabel('X', fontsize=12)
        ax1.set_ylabel('Y', fontsize=12)
        ax1.set_title('回归方法比较 - 拟合曲线', fontsize=14, fontweight='bold')
        ax1.legend()
        ax1.grid(True, alpha=0.3)
        
        # 右图：性能指标比较
        if comparison_results:
            methods_list = list(comparison_results.keys())
            r2_values = [comparison_results[m]["r_squared"] for m in methods_list]
            mse_values = [comparison_results[m]["mse"] for m in methods_list]
            
            # 创建双y轴
            ax2_twin = ax2.twinx()
            
            # R²柱状图
            bars1 = ax2.bar([m.upper() for m in methods_list], r2_values, 
                           alpha=0.7, color='skyblue', label='R²')
            ax2.set_ylabel('R² 值', fontsize=12, color='blue')
            ax2.tick_params(axis='y', labelcolor='blue')
            ax2.set_ylim(0, 1)
            
            # MSE折线图
            line = ax2_twin.plot([m.upper() for m in methods_list], mse_values, 
                               'ro-', linewidth=2, markersize=8, label='MSE')
            ax2_twin.set_ylabel('MSE 值', fontsize=12, color='red')
            ax2_twin.tick_params(axis='y', labelcolor='red')
            
            # 添加数值标签
            for i, (r2, mse) in enumerate(zip(r2_values, mse_values)):
                ax2.text(i, r2 + 0.02, f'{r2:.3f}', ha='center', va='bottom', fontweight='bold')
                ax2_twin.text(i, mse, f'{mse:.2e}', ha='center', va='bottom', 
                            bbox=dict(boxstyle='round,pad=0.3', facecolor='white', alpha=0.8))
            
            ax2.set_title('回归方法比较 - 性能指标', fontsize=14, fontweight='bold')
            ax2.grid(True, alpha=0.3)
        
        plt.tight_layout()
        
        # 转换为base64
        image_base64 = create_plot_base64(fig)
        
        return [
            TextContent(
                type="text",
                text=json.dumps({
                    "success": True,
                    "methods_compared": methods,
                    "comparison_results": comparison_results,
                    "best_method": min(comparison_results.keys(), 
                                      key=lambda k: comparison_results[k]["mse"]) if comparison_results else None,
                    "plot_type": "method_comparison",
                    "image_base64": image_base64
                }, ensure_ascii=False, indent=2)
            )
        ]
        
    except Exception as e:
        raise ValueError(f"方法比较图绘制错误: {str(e)}")

async def handle_plot_diagnostics(arguments: Dict[str, Any]) -> list[TextContent]:
    """
    处理综合诊断图绘制
    """
    try:
        model_id = arguments["model_id"]
        
        if model_id not in fitted_models:
            raise ValueError(f"模型ID {model_id} 不存在")
        
        model_result = fitted_models[model_id]
        x = np.array(model_result["x"])
        y = np.array(model_result["y"])
        fitted_y = np.array(model_result["fitted_y"])
        residuals = np.array(model_result["residuals"])
        method = model_result["method"]
        
        # 创建2x2子图
        fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(16, 12))
        
        # 1. 拟合图
        sorted_indices = np.argsort(x)
        ax1.scatter(x, y, alpha=0.6, color='blue', s=50, label='原始数据')
        ax1.plot(x[sorted_indices], fitted_y[sorted_indices], 'r-', linewidth=2, label='拟合曲线')
        ax1.set_xlabel('X', fontsize=12)
        ax1.set_ylabel('Y', fontsize=12)
        ax1.set_title(f'{method} - 拟合结果', fontsize=14, fontweight='bold')
        ax1.legend()
        ax1.grid(True, alpha=0.3)
        
        # 添加R²和RMSE信息
        r2 = model_result.get('r_squared', 0)
        rmse = model_result.get('rmse', 0)
        ax1.text(0.05, 0.95, f'R² = {r2:.4f}\nRMSE = {rmse:.4f}', 
                transform=ax1.transAxes, verticalalignment='top',
                bbox=dict(boxstyle='round', facecolor='white', alpha=0.8))
        
        # 2. 残差vs拟合值图
        ax2.scatter(fitted_y, residuals, alpha=0.6, color='green', s=50)
        ax2.axhline(y=0, color='red', linestyle='--', alpha=0.8)
        ax2.set_xlabel('拟合值', fontsize=12)
        ax2.set_ylabel('残差', fontsize=12)
        ax2.set_title('残差 vs 拟合值', fontsize=14, fontweight='bold')
        ax2.grid(True, alpha=0.3)
        
        # 3. 残差Q-Q图
        stats.probplot(residuals, dist="norm", plot=ax3)
        ax3.set_title('残差正态Q-Q图', fontsize=14, fontweight='bold')
        ax3.grid(True, alpha=0.3)
        
        # 4. 残差直方图
        ax4.hist(residuals, bins=20, alpha=0.7, color='orange', edgecolor='black', density=True)
        ax4.axvline(x=0, color='red', linestyle='--', alpha=0.8)
        
        # 添加正态分布曲线
        x_norm = np.linspace(residuals.min(), residuals.max(), 100)
        y_norm = stats.norm.pdf(x_norm, np.mean(residuals), np.std(residuals))
        ax4.plot(x_norm, y_norm, 'r-', linewidth=2, label='正态分布')
        
        ax4.set_xlabel('残差', fontsize=12)
        ax4.set_ylabel('密度', fontsize=12)
        ax4.set_title('残差分布', fontsize=14, fontweight='bold')
        ax4.legend()
        ax4.grid(True, alpha=0.3)
        
        plt.tight_layout()
        
        # 计算诊断统计量
        from scipy.stats import jarque_bera, shapiro
        
        # Jarque-Bera正态性检验
        jb_stat, jb_pvalue = jarque_bera(residuals)
        
        # Shapiro-Wilk正态性检验
        sw_stat, sw_pvalue = shapiro(residuals)
        
        # 残差统计
        residual_stats = {
            "mean": float(np.mean(residuals)),
            "std": float(np.std(residuals)),
            "min": float(np.min(residuals)),
            "max": float(np.max(residuals)),
            "skewness": float(stats.skew(residuals)),
            "kurtosis": float(stats.kurtosis(residuals))
        }
        
        # 正态性检验结果
        normality_tests = {
            "jarque_bera": {
                "statistic": float(jb_stat),
                "p_value": float(jb_pvalue),
                "is_normal": jb_pvalue > 0.05
            },
            "shapiro_wilk": {
                "statistic": float(sw_stat),
                "p_value": float(sw_pvalue),
                "is_normal": sw_pvalue > 0.05
            }
        }
        
        # 转换为base64
        image_base64 = create_plot_base64(fig)
        
        return [
            TextContent(
                type="text",
                text=json.dumps({
                    "success": True,
                    "model_id": model_id,
                    "method": method,
                    "plot_type": "comprehensive_diagnostics",
                    "image_base64": image_base64,
                    "model_performance": {
                        "r_squared": float(r2),
                        "rmse": float(rmse),
                        "mse": float(rmse**2)
                    },
                    "residual_statistics": residual_stats,
                    "normality_tests": normality_tests,
                    "diagnostic_summary": {
                        "residuals_normal": all([test["is_normal"] for test in normality_tests.values()]),
                        "model_quality": "excellent" if r2 > 0.9 else "good" if r2 > 0.7 else "fair" if r2 > 0.5 else "poor"
                    }
                }, ensure_ascii=False, indent=2)
            )
        ]
        
    except Exception as e:
        raise ValueError(f"综合诊断图绘制错误: {str(e)}")

async def handle_detect_data_structure(arguments: Dict[str, Any]) -> list[TextContent]:
    """
    处理数据结构检测
    """
    try:
        data = arguments["data"]
        logger.info(f"接收到数据结构检测请求，数据类型: {type(data)}")
        logger.info(f"数据内容: {str(data)[:200]}...")
        
        # 使用之前定义的detect_data_structure函数
        structure_info = detect_data_structure(data)
        logger.info(f"数据结构检测完成: {structure_info}")
        
        # 确保所有NumPy数组都转换为列表
        def convert_numpy_to_list(obj):
            if hasattr(obj, 'tolist'):
                return obj.tolist()
            elif isinstance(obj, dict):
                return {k: convert_numpy_to_list(v) for k, v in obj.items()}
            elif isinstance(obj, (list, tuple)):
                return [convert_numpy_to_list(item) for item in obj]
            else:
                return obj
        
        structure_info = convert_numpy_to_list(structure_info)
        
        return [
            TextContent(
                type="text",
                text=json.dumps({
                    "success": True,
                    "data_structure": structure_info
                }, ensure_ascii=False, indent=2)
            )
        ]
        
    except Exception as e:
        logger.error(f"数据结构检测错误: {str(e)}")
        return [
            TextContent(
                type="text",
                text=json.dumps({
                    "success": False,
                    "error": str(e),
                    "data_structure": {
                        "data_type": None,
                        "sample_count": 0,
                        "structure": None,
                        "recommendations": [],
                        "parsed_data": None
                    }
                }, ensure_ascii=False, indent=2)
            )
        ]

async def handle_auto_regression_analysis(arguments: Dict[str, Any]) -> list[TextContent]:
    """
    处理自动回归分析
    """
    try:
        data = arguments["data"]
        method = arguments.get("method", "auto")
        include_visualization = arguments.get("include_visualization", True)
        
        # 使用之前定义的auto_regression_analysis函数
        analysis_result = auto_regression_analysis(data, method)
        
        # 确保所有NumPy数组都转换为列表
        def convert_numpy_to_list(obj):
            if hasattr(obj, 'tolist'):
                return obj.tolist()
            elif isinstance(obj, dict):
                return {k: convert_numpy_to_list(v) for k, v in obj.items()}
            elif isinstance(obj, (list, tuple)):
                return [convert_numpy_to_list(item) for item in obj]
            else:
                return obj
        
        analysis_result = convert_numpy_to_list(analysis_result)
        
        return [
            TextContent(
                type="text",
                text=json.dumps({
                    "success": True,
                    "analysis_result": analysis_result
                }, ensure_ascii=False, indent=2)
            )
        ]
        
    except Exception as e:
        raise ValueError(f"自动回归分析错误: {str(e)}")

async def handle_multi_sample_visualization(arguments: Dict[str, Any]) -> list[TextContent]:
    """
    处理多样本可视化
    """
    try:
        data = arguments["data"]
        method = arguments.get("method", "auto")
        title = arguments.get("title", "多样本非参数回归分析")
        save_path = arguments.get("save_path")
        
        # 解析多样本数据
        if isinstance(data, str):
            data = json.loads(data)
        
        # 检查数据格式
        if not isinstance(data, dict):
            raise ValueError("多样本数据应为字典格式，键为样本名称，值为数据")
        
        # 确保中文字体正确显示
        ensure_chinese_display()
        
        # 创建多样本可视化
        fig, axes = plt.subplots(2, 2, figsize=(16, 12))
        fig.suptitle(title, fontsize=16, fontweight='bold')
        
        sample_names = list(data.keys())
        colors = ['blue', 'red', 'green', 'orange', 'purple', 'brown']
        
        # 为每个样本进行回归分析
        results = {}
        
        for i, (sample_name, sample_data) in enumerate(data.items()):
            if i >= 4:  # 最多显示4个样本
                break
                
            row = i // 2
            col = i % 2
            ax = axes[row, col]
            
            # 为样本数据创建时间序列
            if isinstance(sample_data, list):
                x = np.arange(len(sample_data))
                y = np.array(sample_data)
            else:
                # 如果是字典格式，尝试解析
                x, y = parse_data_input(sample_data)
            
            # 进行回归分析
            if method == "auto":
                result = regression_analyzer.auto_select_method(x, y)
                selected_method = result['method']
            else:
                if method == "loess":
                    result = regression_analyzer.loess_regression(x, y)
                elif method == "kernel":
                    result = regression_analyzer.kernel_regression(x, y)
                elif method == "spline":
                    result = regression_analyzer.spline_regression(x, y)
                elif method == "local_poly":
                    result = regression_analyzer.local_polynomial_regression(x, y)
                else:
                    raise ValueError(f"不支持的回归方法: {method}")
                selected_method = method
            
            results[sample_name] = {
                "method": selected_method,
                "r_squared": result.get('r_squared', 0),
                "rmse": result.get('rmse', 0)
            }
            
            # 绘制数据点和拟合曲线
            color = colors[i % len(colors)]
            ax.scatter(x, y, alpha=0.6, color=color, s=50, label='数据点')
            
            fitted_x = np.array(result['fitted_x'])
            fitted_y = np.array(result['fitted_y'])
            sort_idx = np.argsort(fitted_x)
            ax.plot(fitted_x[sort_idx], fitted_y[sort_idx], color=color, linewidth=2, 
                   label=f'{selected_method}拟合')
            
            # 设置子图标题和标签
            ax.set_title(f'{sample_name} (R²={result.get("r_squared", 0):.3f})', fontsize=12)
            ax.set_xlabel('X')
            ax.set_ylabel('Y')
            ax.legend()
            ax.grid(True, alpha=0.3)
        
        # 如果样本数少于4个，隐藏多余的子图
        for i in range(len(data), 4):
            row = i // 2
            col = i % 2
            axes[row, col].set_visible(False)
        
        plt.tight_layout()
        
        # 保存图表（如果指定了路径）
        if save_path:
            try:
                plt.savefig(save_path, dpi=300, bbox_inches='tight')
                logger.info(f"多样本图表已保存到: {save_path}")
            except Exception as e:
                logger.warning(f"保存多样本图表失败: {str(e)}")
        
        # 转换为base64
        image_base64 = create_plot_base64(fig)
        
        return [
            TextContent(
                type="text",
                text=json.dumps({
                    "success": True,
                    "plot_type": "multi_sample_analysis",
                    "image_base64": image_base64,
                    "sample_results": results,
                    "summary": {
                        "total_samples": len(data),
                        "displayed_samples": min(len(data), 4),
                        "average_r_squared": np.mean([r["r_squared"] for r in results.values()]),
                        "best_sample": max(results.keys(), key=lambda k: results[k]["r_squared"]) if results else None
                    }
                }, ensure_ascii=False, indent=2)
            )
        ]
        
    except Exception as e:
        raise ValueError(f"多样本可视化错误: {str(e)}")

async def handle_plot_complete_analysis(arguments: Dict[str, Any]) -> list[TextContent]:
    """
    处理完整分析可视化：包含历史数据拟合和未来预测
    """
    try:
        # 解析输入参数
        province_name = arguments.get("province_name")  # 新增省份名称参数
        x, y = parse_data_input(arguments["historical_data"], province_name)
        prediction_years = np.array(arguments["prediction_years"])
        title = arguments.get("title", "历史数据与预测分析")
        x_label = arguments.get("x_label", "年份")
        y_label = arguments.get("y_label", "数值")
        method = arguments.get("method", "auto")
        show_confidence = arguments.get("show_confidence", True)
        save_path = arguments.get("save_path")
        
        # 选择回归方法并拟合模型
        if method == "auto":
            result = regression_analyzer.auto_select_method(x, y)
        elif method == "loess":
            result = regression_analyzer.loess_regression(x, y)
        elif method == "kernel":
            result = regression_analyzer.kernel_regression(x, y)
        elif method == "spline":
            result = regression_analyzer.spline_regression(x, y)
        elif method == "local_poly":
            result = regression_analyzer.local_polynomial_regression(x, y)
        else:
            raise ValueError(f"不支持的回归方法: {method}")
        
        # 进行预测
        fitted_model = result['fitted_model']
        if method == "auto":
            method = result['method']
        
        # 使用拟合模型进行预测
        predictions = []
        prediction_confidence_intervals = {'lower': [], 'upper': []}
        
        if 'fitted_x' in fitted_model and 'fitted_y' in fitted_model:
            # 使用插值进行预测
            fitted_x = np.array(fitted_model['fitted_x'])
            fitted_y = np.array(fitted_model['fitted_y'])
            
            # 计算预测不确定性（基于残差标准差）
            residual_std = result.get('rmse', np.std(y - fitted_y))
            
            # 创建插值函数
            from scipy.interpolate import interp1d
            
            # 扩展拟合范围以支持外推
            x_min, x_max = fitted_x.min(), fitted_x.max()
            x_extended = np.concatenate([fitted_x, prediction_years[prediction_years > x_max]])
            
            # 对于超出范围的预测，使用线性外推
            if len(prediction_years[prediction_years > x_max]) > 0:
                # 使用最后几个点进行线性外推
                last_points = 3
                if len(fitted_x) >= last_points:
                    slope = (fitted_y[-1] - fitted_y[-last_points]) / (fitted_x[-1] - fitted_x[-last_points])
                    for i, pred_x in enumerate(prediction_years[prediction_years > x_max]):
                        pred_y = fitted_y[-1] + slope * (pred_x - fitted_x[-1])
                        predictions.append(pred_y)
                        # 为外推预测增加额外的不确定性
                        extrapolation_factor = 1 + 0.1 * (pred_x - x_max)  # 距离越远不确定性越大
                        pred_std = residual_std * extrapolation_factor
                        prediction_confidence_intervals['lower'].append(pred_y - 1.96 * pred_std)
                        prediction_confidence_intervals['upper'].append(pred_y + 1.96 * pred_std)
                else:
                    # 如果点太少，使用简单线性外推
                    slope = (fitted_y[-1] - fitted_y[0]) / (fitted_x[-1] - fitted_x[0])
                    for i, pred_x in enumerate(prediction_years[prediction_years > x_max]):
                        pred_y = fitted_y[-1] + slope * (pred_x - fitted_x[-1])
                        predictions.append(pred_y)
                        # 为外推预测增加额外的不确定性
                        extrapolation_factor = 1 + 0.1 * (pred_x - x_max)
                        pred_std = residual_std * extrapolation_factor
                        prediction_confidence_intervals['lower'].append(pred_y - 1.96 * pred_std)
                        prediction_confidence_intervals['upper'].append(pred_y + 1.96 * pred_std)
            
            # 对于范围内的预测，使用插值
            pred_in_range = prediction_years[prediction_years <= x_max]
            if len(pred_in_range) > 0:
                interp_func = interp1d(fitted_x, fitted_y, kind='cubic', fill_value='extrapolate')
                pred_in_range_values = interp_func(pred_in_range)
                # 将范围内的预测插入到正确位置
                all_predictions = []
                all_lower_ci = []
                all_upper_ci = []
                in_range_idx = 0
                out_range_idx = 0
                for pred_x in prediction_years:
                    if pred_x <= x_max:
                        pred_y = pred_in_range_values[in_range_idx]
                        all_predictions.append(pred_y)
                        # 范围内预测的置信区间
                        all_lower_ci.append(pred_y - 1.96 * residual_std)
                        all_upper_ci.append(pred_y + 1.96 * residual_std)
                        in_range_idx += 1
                    else:
                        all_predictions.append(predictions[out_range_idx])
                        all_lower_ci.append(prediction_confidence_intervals['lower'][out_range_idx])
                        all_upper_ci.append(prediction_confidence_intervals['upper'][out_range_idx])
                        out_range_idx += 1
                predictions = all_predictions
                prediction_confidence_intervals = {'lower': all_lower_ci, 'upper': all_upper_ci}
            else:
                # 如果没有范围内的预测，直接使用外推结果
                pass
        
        # 创建可视化图表
        # 确保中文字体正确显示
        ensure_chinese_display()
        
        fig, ax = plt.subplots(figsize=(14, 8))
        
        # 绘制历史数据点
        ax.scatter(x, y, alpha=0.7, color='blue', s=60, label='历史数据', zorder=3)
        
        # 绘制历史数据拟合曲线
        fitted_x = np.array(result['fitted_x'])
        fitted_y = np.array(result['fitted_y'])
        sort_idx = np.argsort(fitted_x)
        ax.plot(fitted_x[sort_idx], fitted_y[sort_idx], color='red', linewidth=2.5, 
               label=f'{method}拟合曲线', zorder=2)
        
        # 绘制预测数据点
        ax.scatter(prediction_years, predictions, alpha=0.8, color='green', s=80, 
                  marker='^', label='预测数据', zorder=3)
        
        # 连接历史数据和预测数据
        if len(fitted_x) > 0 and len(prediction_years) > 0:
            # 连接最后的历史点和第一个预测点
            last_hist_x = fitted_x[sort_idx][-1]
            last_hist_y = fitted_y[sort_idx][-1]
            first_pred_x = prediction_years[0]
            first_pred_y = predictions[0]
            
            ax.plot([last_hist_x, first_pred_x], [last_hist_y, first_pred_y], 
                   color='orange', linewidth=2, linestyle='--', alpha=0.7, label='预测连接线')
        
        # 连接预测点
        if len(prediction_years) > 1:
            pred_sort_idx = np.argsort(prediction_years)
            ax.plot(prediction_years[pred_sort_idx], np.array(predictions)[pred_sort_idx], 
                   color='green', linewidth=2, linestyle='--', alpha=0.8, label='预测趋势线')
        
        # 绘制预测数据的置信区间（如果可用且要求显示）
        if show_confidence and len(prediction_confidence_intervals['lower']) > 0:
            pred_lower = np.array(prediction_confidence_intervals['lower'])
            pred_upper = np.array(prediction_confidence_intervals['upper'])
            pred_sort_idx = np.argsort(prediction_years)
            ax.fill_between(prediction_years[pred_sort_idx], pred_lower[pred_sort_idx], pred_upper[pred_sort_idx], 
                          alpha=0.3, color='green', label='预测95%置信区间')
        
        # 添加垂直分割线区分历史和预测
        if len(fitted_x) > 0 and len(prediction_years) > 0:
            split_x = fitted_x.max()
            ax.axvline(x=split_x, color='gray', linestyle=':', alpha=0.6, linewidth=1.5)
            
            # 添加区域标注
            y_range = ax.get_ylim()
            ax.text(split_x - (split_x - fitted_x.min()) * 0.1, y_range[1] * 0.95, 
                   '历史数据', ha='center', va='top', fontsize=10, 
                   bbox=dict(boxstyle='round,pad=0.3', facecolor='lightblue', alpha=0.7))
            ax.text(split_x + (prediction_years.max() - split_x) * 0.1, y_range[1] * 0.95, 
                   '预测数据', ha='center', va='top', fontsize=10,
                   bbox=dict(boxstyle='round,pad=0.3', facecolor='lightgreen', alpha=0.7))
        
        # 设置标题和标签
        ax.set_title(title, fontsize=16, fontweight='bold', pad=20)
        ax.set_xlabel(x_label, fontsize=13)
        ax.set_ylabel(y_label, fontsize=13)
        ax.legend(loc='best', fontsize=11)
        ax.grid(True, alpha=0.3)
        
        # 添加统计信息
        r_squared = result.get('r_squared', 0)
        rmse = result.get('rmse', 0)
        stats_text = f'回归方法: {method}\nR² = {r_squared:.4f}\nRMSE = {rmse:.4f}'
        ax.text(0.02, 0.98, stats_text, transform=ax.transAxes, 
               verticalalignment='top', fontsize=10,
               bbox=dict(boxstyle='round,pad=0.5', facecolor='lightyellow', alpha=0.8))
        
        # 添加预测结果文本
        pred_text = "预测结果:\n"
        for i, (year, pred) in enumerate(zip(prediction_years, predictions)):
            pred_text += f"{year}: {pred:.1f}\n"
            if i >= 4:  # 最多显示5个预测值
                pred_text += "..."
                break
        
        ax.text(0.98, 0.02, pred_text, transform=ax.transAxes, 
               verticalalignment='bottom', horizontalalignment='right', fontsize=10,
               bbox=dict(boxstyle='round,pad=0.5', facecolor='lightcyan', alpha=0.8))
        
        plt.tight_layout()
        
        # 保存图表（如果指定了路径）
        if save_path:
            try:
                plt.savefig(save_path, dpi=300, bbox_inches='tight')
                logger.info(f"图表已保存到: {save_path}")
            except Exception as e:
                logger.warning(f"保存图表失败: {str(e)}")
        
        # 转换为base64
        image_base64 = create_plot_base64(fig)
        
        # 准备返回结果
        analysis_result = {
            "success": True,
            "method": method,
            "plot_type": "complete_analysis",
            "image_base64": image_base64,
            "historical_data_summary": {
                "sample_size": len(x),
                "x_range": [float(x.min()), float(x.max())],
                "y_range": [float(y.min()), float(y.max())],
                "y_mean": float(y.mean()),
                "y_std": float(y.std())
            },
            "model_performance": {
                "r_squared": float(r_squared),
                "rmse": float(rmse),
                "mse": float(rmse**2)
            },
            "predictions": {
                "years": prediction_years.tolist(),
                "values": [float(p) for p in predictions],
                "prediction_summary": {
                    "min_predicted": float(min(predictions)),
                    "max_predicted": float(max(predictions)),
                    "mean_predicted": float(np.mean(predictions)),
                    "trend": "increasing" if predictions[-1] > predictions[0] else "decreasing" if predictions[-1] < predictions[0] else "stable"
                }
            }
        }
        
        if save_path:
            analysis_result["saved_path"] = save_path
        
        return [
            TextContent(
                type="text",
                text=json.dumps(analysis_result, ensure_ascii=False, indent=2)
            )
        ]
        
    except Exception as e:
        raise ValueError(f"完整分析可视化错误: {str(e)}")

@server.call_tool()
async def handle_call_tool(name: str, arguments: Dict[str, Any]) -> list[TextContent]:
    """
    处理工具调用请求
    """
    try:
        tool_name = name
        arguments = arguments or {}
        
        logger.info(f"调用工具: {tool_name}")
        
        if tool_name == "loess_regression":
            return await handle_loess_regression(arguments)
        elif tool_name == "kernel_regression":
            return await handle_kernel_regression(arguments)
        elif tool_name == "spline_regression":
            return await handle_spline_regression(arguments)
        elif tool_name == "local_polynomial_regression":
            return await handle_local_polynomial_regression(arguments)
        elif tool_name == "auto_select_regression":
            return await handle_auto_select_regression(arguments)
        elif tool_name == "predict_values":
            return await handle_predict_values(arguments)
        elif tool_name == "compare_methods":
            return await handle_compare_methods(arguments)
        elif tool_name == "regression_diagnostics":
            return await handle_regression_diagnostics(arguments)
        elif tool_name == "sample_size_analysis":
            return await handle_sample_size_analysis(arguments)
        elif tool_name == "method_recommendation":
            return await handle_method_recommendation(arguments)
        elif tool_name == "plot_regression":
            return await handle_plot_regression(arguments)
        elif tool_name == "plot_residuals":
            return await handle_plot_residuals(arguments)
        elif tool_name == "plot_comparison":
            return await handle_plot_comparison(arguments)
        elif tool_name == "plot_diagnostics":
            return await handle_plot_diagnostics(arguments)
        elif tool_name == "plot_complete_analysis":
            return await handle_plot_complete_analysis(arguments)
        elif tool_name == "detect_data_structure":
            return await handle_detect_data_structure(arguments)
        elif tool_name == "auto_regression_analysis":
            return await handle_auto_regression_analysis(arguments)
        elif tool_name == "multi_sample_visualization":
            return await handle_multi_sample_visualization(arguments)
        else:
            raise ValueError(f"未知工具: {tool_name}")
            
    except Exception as e:
        logger.error(f"工具调用错误: {str(e)}")
        logger.error(traceback.format_exc())
        return [
            TextContent(
                type="text",
                text=json.dumps({
                    "success": False,
                    "error": str(e),
                    "traceback": traceback.format_exc()
                }, ensure_ascii=False, indent=2)
            )
        ]

async def main():
    """
    主函数
    """
    async with stdio_server() as (read_stream, write_stream):
        await server.run(
            read_stream,
            write_stream,
            InitializationOptions(
                server_name="nonparametric-regression",
                server_version="1.0.0",
                capabilities=server.get_capabilities(
                    notification_options=NotificationOptions(),
                    experimental_capabilities={}
                )
            )
        )

if __name__ == "__main__":
    asyncio.run(main())