#!/usr/bin/env python3
"""
小样本两样本位置检验 MCP 服务器
专注于小样本情况下的非参数统计检验方法
使用stdio通讯方式
"""

import asyncio
import json
import logging
import sys
from typing import Any, Dict, List, Optional, Union

import numpy as np
from scipy import stats
from scipy.stats import mannwhitneyu, fisher_exact, ks_2samp, ranksums, chi2_contingency
from statsmodels.stats.contingency_tables import mcnemar
from sklearn.metrics import cohen_kappa_score
import warnings

# MCP imports
from mcp.server.models import InitializationOptions
from mcp.server import NotificationOptions, Server
from mcp.types import (
    Resource,
    Tool,
    TextContent,
    ImageContent,
    EmbeddedResource,
    LoggingLevel
)

def ensure_json_serializable(obj):
    """确保对象可以JSON序列化，递归处理所有numpy类型"""
    if isinstance(obj, dict):
        return {key: ensure_json_serializable(value) for key, value in obj.items()}
    elif isinstance(obj, list):
        return [ensure_json_serializable(item) for item in obj]
    elif isinstance(obj, tuple):
        return tuple(ensure_json_serializable(item) for item in obj)
    elif isinstance(obj, np.ndarray):
        return obj.tolist()
    elif isinstance(obj, np.bool_):  # 必须在bool之前检查
        return bool(obj)
    elif isinstance(obj, (np.integer, int)):
        return int(obj)
    elif isinstance(obj, (np.floating, float)):
        return float(obj)
    elif isinstance(obj, bool):  # Python原生bool类型
        return bool(obj)
    elif hasattr(obj, 'item'):  # 其他numpy标量类型
        return obj.item()
    elif obj is None:
        return None
    elif isinstance(obj, str):
        return str(obj)
    else:
        return obj

# 配置日志
logging.basicConfig(
    level=logging.DEBUG,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.StreamHandler(sys.stdout),
        logging.StreamHandler(sys.stderr)
    ]
)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
warnings.filterwarnings('ignore')

# 添加启动日志
logger.info("=== MCP服务器启动 ===")

server = Server("two-samples-exact-test")

class TwoSampleTestAnalyzer:
    """小样本两样本位置检验分析器"""
    
    @staticmethod
    def _calculate_hodges_lehmann_ci(sample1: List[float], sample2: List[float], confidence_level: float = 0.95) -> Dict[str, float]:
        """
        计算Hodges-Lehmann位置参数差的置信区间
        """
        try:
            # 计算所有可能的差值
            differences = []
            for x in sample1:
                for y in sample2:
                    differences.append(x - y)
            
            differences = np.array(differences)
            differences.sort()
            
            n = len(differences)
            alpha = 1 - confidence_level
            
            # 计算置信区间的位置
            # 使用正态近似计算临界值
            from scipy.stats import norm
            z_alpha_2 = norm.ppf(1 - alpha/2)
            
            # 计算置信区间的下标
            n1, n2 = len(sample1), len(sample2)
            variance = n1 * n2 * (n1 + n2 + 1) / 12
            
            # 计算置信区间边界的近似位置
            center = n // 2
            margin = int(z_alpha_2 * np.sqrt(variance))
            
            lower_idx = max(0, center - margin)
            upper_idx = min(n - 1, center + margin)
            
            return {
                'lower': float(differences[lower_idx]),
                'upper': float(differences[upper_idx]),
                'median_difference': float(np.median(differences))
            }
        except Exception as e:
            return {
                'lower': float('nan'),
                'upper': float('nan'),
                'median_difference': float('nan'),
                'error': str(e)
            }
    
    @staticmethod
    def check_sample_size(sample1: List[float], sample2: List[float]) -> Dict[str, Any]:
        """
        检查样本大小并给出小样本精确检验方法建议
        """
        n1, n2 = len(sample1), len(sample2)
        total_n = n1 + n2
        
        if total_n > 30:
            recommendation = "样本量较大，建议使用大样本近似检验服务"
            category = "large"
            service_recommendation = "mcp.config.usrlocalmcp.two-sample-approximation-test"
        elif total_n < 10:
            recommendation = "样本量很小，建议使用精确检验方法如Fisher精确检验或置换检验"
            category = "very_small"
            service_recommendation = "mcp.config.usrlocalmcp.two-sample-exact-test"
        else:
            recommendation = "样本量适中，适合使用非参数精确检验方法"
            category = "small"
            service_recommendation = "mcp.config.usrlocalmcp.two-sample-exact-test"
            
        result = {
            'sample1_size': n1,
            'sample2_size': n2,
            'total_size': total_n,
            'size_category': category,
            'recommendation': recommendation,
            'recommended_service': service_recommendation,
            'is_large_sample': total_n > 30
        }
        return ensure_json_serializable(result)
    
    @staticmethod
    def mann_whitney_u_test(sample1: List[float], sample2: List[float], alternative: str = 'two-sided', confidence_level: float = 0.95) -> Dict[str, Any]:
        """
        Mann-Whitney U检验（精确版本，适合小样本，样本量≤30）
        """
        try:
            # 检查样本大小
            n1, n2 = len(sample1), len(sample2)
            total_n = n1 + n2
            
            # 样本量检查：小样本精确检验服务只处理样本量≤30的情况
            if total_n > 30:
                result = {
                    'error': f'样本量过大(n={total_n})，超出小样本精确检验范围(≤30)，请使用大样本近似检验服务',
                    'recommended_service': 'mcp.config.usrlocalmcp.two-sample-approximation-test',
                    'method': 'Mann-Whitney U Test (Exact)',
                    'sample_size_limit': 30
                }
                return ensure_json_serializable(result)
            
            # 执行精确检验
            statistic, p_value = mannwhitneyu(sample1, sample2, alternative=alternative)
            
            # 计算效应量（r = Z / sqrt(N)）
            mean_u = n1 * n2 / 2
            std_u = np.sqrt(n1 * n2 * (n1 + n2 + 1) / 12)
            z_score = (statistic - mean_u) / std_u
            effect_size_r = abs(z_score) / np.sqrt(total_n)
            
            # 计算位置参数差的置信区间（Hodges-Lehmann估计）
            confidence_interval = TwoSampleTestAnalyzer._calculate_hodges_lehmann_ci(
                sample1, sample2, confidence_level
            )
            
            reject_null = p_value < 0.05
            
            result = {
                'u_statistic': statistic,
                'p_value': p_value,
                'reject_null': reject_null,
                'effect_size_r': effect_size_r,
                'confidence_interval': confidence_interval,
                'confidence_level': confidence_level,
                'alternative': alternative,
                'method': 'Mann-Whitney U Test (Exact)',
                'interpretation': f"U统计量为{statistic:.4f}，p值为{p_value:.4f}，效应量r为{effect_size_r:.4f}，{'拒绝' if reject_null else '不拒绝'}原假设。位置参数差的{confidence_level*100}%置信区间为[{confidence_interval['lower']:.4f}, {confidence_interval['upper']:.4f}]"
            }
            return ensure_json_serializable(result)
        except Exception as e:
            result = {
                'error': str(e),
                'method': 'Mann-Whitney U Test (Exact)'
            }
            return ensure_json_serializable(result)
    
    @staticmethod
    def fisher_exact_test(table: List[List[int]], confidence_level: float = 0.95) -> Dict[str, Any]:
        """
        Fisher精确检验实现（适合小样本分类数据，样本量≤30）
        """
        try:
            from scipy.stats import norm
            
            if len(table) != 2 or len(table[0]) != 2:
                return ensure_json_serializable({
                    'error': 'Fisher精确检验需要2x2列联表',
                    'method': 'Fisher Exact Test'
                })
            
            # 检查样本量：小样本精确检验服务只处理样本量≤30的情况
            total_n = sum(sum(row) for row in table)
            if total_n > 30:
                return ensure_json_serializable({
                    'error': f'样本量过大(n={total_n})，超出小样本精确检验范围(≤30)，请使用大样本近似检验服务',
                    'recommended_service': 'mcp.config.usrlocalmcp.two-sample-approximation-test',
                    'method': 'Fisher Exact Test',
                    'sample_size_limit': 30
                })
                
            odds_ratio, p_value = fisher_exact(table)
            
            # 计算比值比的置信区间（使用精确方法）
            a, b = table[0]
            c, d = table[1]
            
            # 使用对数变换计算置信区间
            if odds_ratio > 0 and odds_ratio != float('inf'):
                log_or = np.log(odds_ratio)
                se_log_or = np.sqrt(1/a + 1/b + 1/c + 1/d) if all(x > 0 for x in [a, b, c, d]) else None
                
                if se_log_or is not None:
                    alpha = 1 - confidence_level
                    z_critical = norm.ppf(1 - alpha/2)
                    
                    log_ci_lower = log_or - z_critical * se_log_or
                    log_ci_upper = log_or + z_critical * se_log_or
                    
                    confidence_interval = {
                        'lower': float(np.exp(log_ci_lower)),
                        'upper': float(np.exp(log_ci_upper)),
                        'odds_ratio': float(odds_ratio)
                    }
                else:
                    confidence_interval = {
                        'lower': None,
                        'upper': None,
                        'odds_ratio': float(odds_ratio),
                        'note': '存在零计数，无法计算置信区间'
                    }
            else:
                confidence_interval = {
                    'lower': None,
                    'upper': None,
                    'odds_ratio': float(odds_ratio),
                    'note': '比值比为无穷大或零，无法计算置信区间'
                }
            
            result = {
                'odds_ratio': odds_ratio,
                'p_value': p_value,
                'reject_null': p_value < 0.05,
                'confidence_interval': confidence_interval,
                'confidence_level': confidence_level,
                'method': 'Fisher Exact Test',
                'interpretation': f"比值比为{odds_ratio:.4f}，p值为{p_value:.4f}，{'拒绝' if p_value < 0.05 else '不拒绝'}原假设。比值比的{confidence_level*100}%置信区间为[{confidence_interval.get('lower', 'N/A')}, {confidence_interval.get('upper', 'N/A')}]"
            }
            
            return ensure_json_serializable(result)
            
        except Exception as e:
            return ensure_json_serializable({
                'error': str(e),
                'method': 'Fisher Exact Test'
            })
    
    @staticmethod
    def kolmogorov_smirnov_test(sample1: List[float], sample2: List[float]) -> Dict[str, Any]:
        """
        Kolmogorov-Smirnov双样本检验（适合小样本，样本量≤30）
        """
        try:
            # 检查样本量：小样本精确检验服务只处理样本量≤30的情况
            n1, n2 = len(sample1), len(sample2)
            total_n = n1 + n2
            
            if total_n > 30:
                return ensure_json_serializable({
                    'error': f'样本量过大(n={total_n})，超出小样本精确检验范围(≤30)，请使用大样本近似检验服务',
                    'recommended_service': 'mcp.config.usrlocalmcp.two-sample-approximation-test',
                    'method': 'Kolmogorov-Smirnov Test',
                    'sample_size_limit': 30
                })
            
            statistic, p_value = ks_2samp(sample1, sample2)
            
            result = {
                'ks_statistic': statistic,
                'p_value': p_value,
                'reject_null': p_value < 0.05,
                'method': 'Kolmogorov-Smirnov Test',
                'interpretation': f"KS统计量为{statistic:.4f}，p值为{p_value:.4f}，{'拒绝' if p_value < 0.05 else '不拒绝'}原假设"
            }
            return ensure_json_serializable(result)
        except Exception as e:
            return ensure_json_serializable({
                'error': str(e),
                'method': 'Kolmogorov-Smirnov Test'
            })
    

    

    
    @staticmethod
    def wilcoxon_rank_sum_test(sample1: List[float], sample2: List[float], confidence_level: float = 0.95) -> Dict[str, Any]:
        """
        Wilcoxon秩和检验（精确版本，适合小样本，样本量≤30）
        """
        try:
            # 检查样本大小
            n1, n2 = len(sample1), len(sample2)
            total_n = n1 + n2
            
            # 样本量检查：小样本精确检验服务只处理样本量≤30的情况
            if total_n > 30:
                result = {
                    'error': f'样本量过大(n={total_n})，超出小样本精确检验范围(≤30)，请使用大样本近似检验服务',
                    'recommended_service': 'mcp.config.usrlocalmcp.two-sample-approximation-test',
                    'method': 'Wilcoxon Rank Sum Test (Exact)',
                    'sample_size_limit': 30
                }
                return ensure_json_serializable(result)
            
            # 执行精确检验
            statistic, p_value = ranksums(sample1, sample2)
            
            # 计算效应量
            effect_size_r = abs(statistic) / np.sqrt(total_n)
            
            # 计算位置参数差的置信区间（Hodges-Lehmann估计）
            confidence_interval = TwoSampleTestAnalyzer._calculate_hodges_lehmann_ci(
                sample1, sample2, confidence_level
            )
            
            reject_null = p_value < 0.05
            
            result = {
                'z_statistic': statistic,
                'p_value': p_value,
                'reject_null': reject_null,
                'effect_size_r': effect_size_r,
                'confidence_interval': confidence_interval,
                'confidence_level': confidence_level,
                'method': 'Wilcoxon Rank Sum Test (Exact)',
                'interpretation': f"Z统计量为{statistic:.4f}，p值为{p_value:.4f}，效应量r为{effect_size_r:.4f}，{'拒绝' if reject_null else '不拒绝'}原假设。位置参数差的{confidence_level*100}%置信区间为[{confidence_interval['lower']:.4f}, {confidence_interval['upper']:.4f}]"
            }
            return ensure_json_serializable(result)
        except Exception as e:
            result = {
                'error': str(e),
                'method': 'Wilcoxon Rank Sum Test (Exact)'
            }
            return ensure_json_serializable(result)
    
    @staticmethod
    def mcnemar_test(table: List[List[int]]) -> Dict[str, Any]:
        """
        McNemar 检验实现（适合小样本配对分类数据，样本量≤30）
        """
        try:
            if len(table) != 2 or len(table[0]) != 2:
                return ensure_json_serializable({
                    'error': 'McNemar检验需要2x2列联表',
                    'method': 'McNemar Test'
                })
            
            # 检查样本量：小样本精确检验服务只处理样本量≤30的情况
            total_n = sum(sum(row) for row in table)
            if total_n > 30:
                return ensure_json_serializable({
                    'error': f'样本量过大(n={total_n})，超出小样本精确检验范围(≤30)，请使用大样本近似检验服务',
                    'recommended_service': 'mcp.config.usrlocalmcp.two-sample-approximation-test',
                    'method': 'McNemar Test',
                    'sample_size_limit': 30
                })
            
            result = mcnemar(table, exact=True)
            
            mcnemar_result = {
                'statistic': result.statistic,
                'p_value': result.pvalue,
                'reject_null': result.pvalue < 0.05,
                'method': 'McNemar Test',
                'interpretation': f"统计量为{result.statistic:.4f}，p值为{result.pvalue:.4f}，{'拒绝' if result.pvalue < 0.05 else '不拒绝'}原假设"
            }
            return ensure_json_serializable(mcnemar_result)
        except Exception as e:
            return ensure_json_serializable({
                'error': str(e),
                'method': 'McNemar Test'
            })
    
    @staticmethod
    def cohen_kappa_coefficient(y_true: List[int], y_pred: List[int]) -> Dict[str, Any]:
        """
        Cohen's Kappa 系数计算
        """
        try:
            kappa = cohen_kappa_score(y_true, y_pred)
            
            # 解释Kappa值
            if kappa < 0:
                strength = "差于随机"
            elif kappa < 0.2:
                strength = "轻微一致"
            elif kappa < 0.4:
                strength = "一般一致"
            elif kappa < 0.6:
                strength = "中等一致"
            elif kappa < 0.8:
                strength = "高度一致"
            else:
                strength = "几乎完全一致"
            
            result = {
                'kappa_score': kappa,
                'consistency_strength': strength,
                'method': 'Cohen Kappa Coefficient',
                'interpretation': f"Kappa系数为{kappa:.4f}，一致性程度为{strength}"
            }
            return ensure_json_serializable(result)
        except Exception as e:
            return ensure_json_serializable({
                'error': str(e),
                'method': 'Cohen Kappa Coefficient'
            })
    
    @staticmethod
    def is_categorical(data: List[Any]) -> Dict[str, Any]:
        """
        判断数据是否为分类数据
        """
        try:
            unique_values = list(set(data))
            n_unique = len(unique_values)
            n_total = len(data)
            
            # 判断是否为分类数据的启发式规则
            is_cat = (
                n_unique <= 10 or  # 唯一值少于等于10个
                n_unique / n_total < 0.5 or  # 唯一值比例小于50%
                all(isinstance(x, (str, bool)) for x in data)  # 全部为字符串或布尔值
            )
            
            result = {
                'is_categorical': is_cat,
                'unique_values': unique_values,
                'n_unique': n_unique,
                'n_total': n_total,
                'uniqueness_ratio': n_unique / n_total
            }
            return ensure_json_serializable(result)
        except Exception as e:
            return ensure_json_serializable({
                'error': str(e),
                'is_categorical': False
            })
    
    @staticmethod
    def auto_select_method(sample1: List[float], sample2: List[float], paired: bool = False) -> Dict[str, Any]:
        """
        根据数据特征自动选择合适的小样本精确检验方法
        """
        try:
            analyzer = TwoSampleTestAnalyzer()
            
            # 检查样本大小
            size_info = analyzer.check_sample_size(sample1, sample2)
            total_n = size_info['total_size']
            
            # 小样本服务只处理n<=30的情况
            if total_n > 30:
                result = {
                    'selected_method': 'use-large-sample-service',
                    'reason': f'样本量较大(n={total_n})，请使用大样本近似检验服务',
                    'recommended_service': 'mcp.config.usrlocalmcp.two-sample-approximation-test'
                }
                return ensure_json_serializable(result)
            
            # 检查数据类型
            cat_info1 = analyzer.is_categorical(sample1)
            cat_info2 = analyzer.is_categorical(sample2)
            
            is_categorical = bool(cat_info1['is_categorical'] or cat_info2['is_categorical'])
            
            # 根据数据类型和样本大小选择精确检验方法
            if paired:
                if is_categorical:
                    result = {
                        'selected_method': 'mcnemar-test',
                        'reason': f'配对分类数据(n={total_n})，推荐McNemar精确检验'
                    }
                else:
                    result = {
                        'selected_method': 'wilcoxon-signed-rank-test',
                        'reason': f'配对连续数据(n={total_n})，推荐Wilcoxon符号秩精确检验'
                    }
            else:
                if is_categorical:
                    result = {
                        'selected_method': 'fisher-exact-test',
                        'reason': f'独立分类数据(n={total_n})，推荐Fisher精确检验'
                    }
                else:
                    if total_n < 10:
                        result = {
                            'selected_method': 'mann-whitney-u-test',
                            'reason': f'极小样本连续数据(n={total_n})，推荐Mann-Whitney U精确检验'
                        }
                    else:
                        result = {
                            'selected_method': 'mann-whitney-u-test',
                            'reason': f'小样本连续数据(n={total_n})，推荐Mann-Whitney U精确检验'
                        }
            
            return ensure_json_serializable(result)
        except Exception as e:
            return ensure_json_serializable({
                'error': str(e),
                'selected_method': 'unknown'
            })

@server.list_tools()
async def handle_list_tools() -> List[Tool]:
    """列出可用的工具"""
    return [
        Tool(
            name="auto_select_method",
            description="根据输入数据特征自动选择合适的非参数统计方法，考虑样本大小、数据类型和是否配对",
            inputSchema={
                "type": "object",
                "properties": {
                    "sample1": {
                        "type": "array",
                        "items": {"type": "number"},
                        "description": "第一个样本数组"
                    },
                    "sample2": {
                        "type": "array",
                        "items": {"type": "number"},
                        "description": "第二个样本数组"
                    },
                    "paired": {
                        "type": "boolean",
                        "description": "是否为配对数据（默认false）",
                        "default": False
                    }
                },
                "required": ["sample1", "sample2"]
            }
        ),
        Tool(
            name="check_sample_size",
            description="检查样本大小并给出统计方法建议",
            inputSchema={
                "type": "object",
                "properties": {
                    "sample1": {
                        "type": "array",
                        "items": {"type": "number"},
                        "description": "第一个样本数组"
                    },
                    "sample2": {
                        "type": "array",
                        "items": {"type": "number"},
                        "description": "第二个样本数组"
                    }
                },
                "required": ["sample1", "sample2"]
            }
        ),
        Tool(
            name="mann_whitney_u_test",
            description="Mann-Whitney U检验，适合小样本连续数据的位置检验",
            inputSchema={
                "type": "object",
                "properties": {
                    "sample1": {
                        "type": "array",
                        "items": {"type": "number"},
                        "description": "第一个样本数组"
                    },
                    "sample2": {
                        "type": "array",
                        "items": {"type": "number"},
                        "description": "第二个样本数组"
                    },
                    "alternative": {
                        "type": "string",
                        "description": "备择假设类型（'two-sided', 'less', 'greater'）",
                        "default": "two-sided"
                    },
                    "confidence_level": {
                        "type": "number",
                        "description": "置信水平（默认0.95）",
                        "default": 0.95
                    }
                },
                "required": ["sample1", "sample2"]
            }
        ),
        Tool(
            name="fisher_exact_test",
            description="Fisher精确检验，适合小样本分类数据的关联性检验",
            inputSchema={
                "type": "object",
                "properties": {
                    "table": {
                        "type": "array",
                        "items": {
                            "type": "array",
                            "items": {"type": "integer"}
                        },
                        "description": "2x2列联表，格式为[[a, b], [c, d]]"
                    },
                    "confidence_level": {
                        "type": "number",
                        "description": "置信水平（默认0.95）",
                        "default": 0.95
                    }
                },
                "required": ["table"]
            }
        ),
        Tool(
            name="kolmogorov_smirnov_test",
            description="Kolmogorov-Smirnov双样本检验，检验两个样本是否来自同一分布",
            inputSchema={
                "type": "object",
                "properties": {
                    "sample1": {
                        "type": "array",
                        "items": {"type": "number"},
                        "description": "第一个样本数组"
                    },
                    "sample2": {
                        "type": "array",
                        "items": {"type": "number"},
                        "description": "第二个样本数组"
                    }
                },
                "required": ["sample1", "sample2"]
            }
        ),

        Tool(
            name="wilcoxon_rank_sum_test",
            description="Wilcoxon秩和检验，适合两样本位置检验",
            inputSchema={
                "type": "object",
                "properties": {
                    "sample1": {
                        "type": "array",
                        "items": {"type": "number"},
                        "description": "第一个样本数组"
                    },
                    "sample2": {
                        "type": "array",
                        "items": {"type": "number"},
                        "description": "第二个样本数组"
                    },
                    "confidence_level": {
                        "type": "number",
                        "description": "置信水平（默认0.95）",
                        "default": 0.95
                    }
                },
                "required": ["sample1", "sample2"]
            }
        ),
        Tool(
            name="mcnemar_test",
            description="McNemar检验，适合配对样本的分类数据检验",
            inputSchema={
                "type": "object",
                "properties": {
                    "table": {
                        "type": "array",
                        "items": {
                            "type": "array",
                            "items": {"type": "integer"}
                        },
                        "description": "配对样本的列联表"
                    }
                },
                "required": ["table"]
            }
        ),
        Tool(
            name="cohen_kappa_coefficient",
            description="Cohen's Kappa系数计算，用于评估评分者间一致性",
            inputSchema={
                "type": "object",
                "properties": {
                    "y_true": {
                        "type": "array",
                        "items": {"type": "integer"},
                        "description": "真实标签数组"
                    },
                    "y_pred": {
                        "type": "array",
                        "items": {"type": "integer"},
                        "description": "预测标签数组"
                    }
                },
                "required": ["y_true", "y_pred"]
            }
        ),
        Tool(
            name="is_categorical",
            description="判断数据是否为分类数据",
            inputSchema={
                "type": "object",
                "properties": {
                    "data": {
                        "type": "array",
                        "items": {},
                        "description": "待判断的数据数组"
                    }
                },
                "required": ["data"]
            }
        )
    ]

@server.call_tool()
async def handle_call_tool(name: str, arguments: dict) -> List[TextContent]:
    """处理工具调用"""
    try:
        logger.info(f"开始处理工具调用: {name}")
        logger.info(f"参数: {arguments}")
        
        analyzer = TwoSampleTestAnalyzer()
        
        # 注释掉样本量检查，让各个方法自行处理样本量限制
        # two_sample_tools = ["mann_whitney_u_test", "kolmogorov_smirnov_test", 
        #                    "permutation_test", "brown_mood_median_test", "wilcoxon_rank_sum_test"]
        # 
        # if name in two_sample_tools:
        #     sample1 = arguments.get("sample1", [])
        #     sample2 = arguments.get("sample2", [])
        #     total_n = len(sample1) + len(sample2)
        #     
        #     if total_n > 30:
        #         result = {
        #             "error": f"样本量过大(n={total_n})，小样本精确检验服务仅适用于n<=30的情况",
        #             "recommendation": "请使用大样本近似检验服务",
        #             "recommended_service": "mcp.config.usrlocalmcp.two-sample-approximation-test"
        #         }
        #         return [TextContent(
        #              type="text",
        #              text=json.dumps(result, ensure_ascii=False, indent=2)
        #          )]
        
        if name == "auto_select_method":
            result = analyzer.auto_select_method(
                sample1=arguments["sample1"],
                sample2=arguments["sample2"],
                paired=arguments.get("paired", False)
            )
        elif name == "check_sample_size":
            result = analyzer.check_sample_size(
                sample1=arguments["sample1"],
                sample2=arguments["sample2"]
            )
        elif name == "mann_whitney_u_test":
            result = analyzer.mann_whitney_u_test(
                sample1=arguments["sample1"],
                sample2=arguments["sample2"],
                alternative=arguments.get("alternative", "two-sided"),
                confidence_level=arguments.get("confidence_level", 0.95)
            )
        elif name == "fisher_exact_test":
            result = analyzer.fisher_exact_test(
                table=arguments["table"],
                confidence_level=arguments.get("confidence_level", 0.95)
            )
        elif name == "kolmogorov_smirnov_test":
            result = analyzer.kolmogorov_smirnov_test(
                sample1=arguments["sample1"],
                sample2=arguments["sample2"]
            )

        elif name == "wilcoxon_rank_sum_test":
            result = analyzer.wilcoxon_rank_sum_test(
                sample1=arguments["sample1"],
                sample2=arguments["sample2"],
                confidence_level=arguments.get("confidence_level", 0.95)
            )
        elif name == "mcnemar_test":
            result = analyzer.mcnemar_test(
                table=arguments["table"]
            )
        elif name == "cohen_kappa_coefficient":
            result = analyzer.cohen_kappa_coefficient(
                y_true=arguments["y_true"],
                y_pred=arguments["y_pred"]
            )
        elif name == "is_categorical":
            result = analyzer.is_categorical(
                data=arguments["data"]
            )
        else:
            raise ValueError(f"未知的工具名称: {name}")
        
        logger.info("开始类型转换")
        
        result = ensure_json_serializable(result)
        logger.info("类型转换完成")
        
        logger.info("开始JSON序列化")
        try:
            json_str = json.dumps(result, ensure_ascii=False, indent=2)
            logger.info(f"JSON序列化成功，长度: {len(json_str)}")
        except Exception as json_error:
            logger.error(f"JSON序列化失败: {json_error}")
            logger.error(f"JSON错误类型: {type(json_error)}")
            raise json_error
        
        logger.info("创建TextContent")
        try:
            text_content = TextContent(type="text", text=json_str)
            logger.info(f"TextContent创建成功: {type(text_content)}")
        except Exception as tc_error:
            logger.error(f"TextContent创建失败: {tc_error}")
            raise tc_error
        
        logger.info("返回结果")
        return [text_content]
    
    except Exception as e:
        import traceback
        full_traceback = traceback.format_exc()
        logger.error(f"工具调用失败: {str(e)}")
        logger.error(f"错误类型: {type(e)}")
        logger.error(f"完整错误堆栈: {full_traceback}")
        print(f"=== 异常发生: {str(e)} ===", flush=True)
        print(f"=== 完整错误堆栈: {full_traceback} ===", flush=True)
        
        error_result = {
            "error": str(e),
            "tool": name,
            "traceback": full_traceback
        }
        
        error_result = ensure_json_serializable(error_result)
        try:
            json_str = json.dumps(error_result, ensure_ascii=False, indent=2)
            return [TextContent(type="text", text=json_str)]
        except Exception as json_error:
            logger.error(f"JSON序列化错误结果失败: {str(json_error)}")
            return [TextContent(type="text", text=f'{{"error": "序列化失败: {str(json_error)}", "original_error": "{str(e)}", "tool": "{name}"}}')]

if __name__ == "__main__":
    import asyncio
    from mcp.server.stdio import stdio_server
    
    async def main():
        async with stdio_server() as (read_stream, write_stream):
            await server.run(
                read_stream,
                write_stream,
                InitializationOptions(
                    server_name="two-sample-exact-test",
                    server_version="1.0.0",
                    capabilities=server.get_capabilities(
                        notification_options=NotificationOptions(),
                        experimental_capabilities={}
                    )
                )
            )
    
    asyncio.run(main())