#!/usr/bin/env python3
"""
大样本两样本非参数检验 MCP 服务器
专注于大样本情况下的非参数统计检验方法
使用stdio通讯方式
"""

import asyncio
import json
import logging
import sys
from typing import Any, Dict, List, Optional, Union

import numpy as np
from scipy import stats
from scipy.stats import mannwhitneyu, ks_2samp, ranksums, normaltest, shapiro
import warnings

# MCP imports
from mcp.server.models import InitializationOptions
from mcp.server import NotificationOptions, Server
from mcp.types import (
    Resource,
    Tool,
    TextContent,
    ImageContent,
    EmbeddedResource,
    LoggingLevel
)

def ensure_json_serializable(obj):
    """确保对象可以JSON序列化，递归处理所有numpy类型"""
    if isinstance(obj, dict):
        return {key: ensure_json_serializable(value) for key, value in obj.items()}
    elif isinstance(obj, list):
        return [ensure_json_serializable(item) for item in obj]
    elif isinstance(obj, tuple):
        return tuple(ensure_json_serializable(item) for item in obj)
    elif isinstance(obj, np.ndarray):
        return obj.tolist()
    elif isinstance(obj, np.bool_):  # 必须在bool之前检查
        return bool(obj)
    elif isinstance(obj, (np.integer, int)):
        return int(obj)
    elif isinstance(obj, (np.floating, float)):
        return float(obj)
    elif isinstance(obj, bool):  # Python原生bool类型
        return bool(obj)
    elif hasattr(obj, 'item'):  # 其他numpy标量类型
        return obj.item()
    elif obj is None:
        return None
    elif isinstance(obj, str):
        return str(obj)
    else:
        return obj

class NumpyEncoder(json.JSONEncoder):
    """自定义JSON编码器，处理numpy类型（保持向后兼容）"""
    def default(self, obj):
        return ensure_json_serializable(obj)

# 配置日志
logging.basicConfig(
    level=logging.DEBUG,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.StreamHandler(sys.stdout),
        logging.StreamHandler(sys.stderr)
    ]
)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
warnings.filterwarnings('ignore')

# 添加启动日志
logger.info("=== 大样本两样本非参数检验MCP服务器启动 ===")

server = Server("two-samples-nonparametric-test")

class TwoSampleNonparametricTestAnalyzer:
    """大样本两样本非参数检验分析器"""
    
    @staticmethod
    def check_sample_size(sample1: List[float], sample2: List[float]) -> Dict[str, Any]:
        """
        检查样本大小并给出大样本近似检验方法建议
        """
        n1, n2 = len(sample1), len(sample2)
        total_n = n1 + n2
        
        if total_n <= 30:
            recommendation = "样本量较小，请使用小样本精确检验服务"
            category = "small"
            service_recommendation = "mcp.config.usrlocalmcp.two-sample-exact-test"
        elif total_n < 100:
            recommendation = "样本量中等，适合使用大样本近似检验方法（t检验、Mann-Whitney U大样本版本）"
            category = "medium"
            service_recommendation = "mcp.config.usrlocalmcp.two-sample-approximation-test"
        elif total_n < 1000:
            recommendation = "样本量较大，可以使用参数检验方法（t检验、Z检验）"
            category = "large"
            service_recommendation = "mcp.config.usrlocalmcp.two-sample-approximation-test"
        else:
            recommendation = "样本量很大，参数检验方法效果最佳（Z检验、t检验）"
            category = "very_large"
            service_recommendation = "mcp.config.usrlocalmcp.two-sample-approximation-test"
            
        result = {
            'sample1_size': n1,
            'sample2_size': n2,
            'total_size': total_n,
            'size_category': category,
            'recommendation': recommendation,
            'recommended_service': service_recommendation,
            'is_large_sample': total_n > 30
        }
        return ensure_json_serializable(result)
    
    @staticmethod
    def normality_test(sample: List[float]) -> Dict[str, Any]:
        """
        正态性检验
        """
        try:
            sample_array = np.array(sample)
            n = len(sample_array)
            
            results = {}
            
            # Shapiro-Wilk检验（适合小到中等样本）
            if n <= 5000:
                shapiro_stat, shapiro_p = shapiro(sample_array)
                results['shapiro_wilk'] = {
                    'statistic': shapiro_stat,
                    'p_value': shapiro_p,
                    'is_normal': shapiro_p > 0.05
                }
            
            # D'Agostino正态性检验（适合大样本）
            if n >= 20:
                dagostino_stat, dagostino_p = normaltest(sample_array)
                results['dagostino'] = {
                    'statistic': dagostino_stat,
                    'p_value': dagostino_p,
                    'is_normal': dagostino_p > 0.05
                }
            
            # 综合判断
            if 'shapiro_wilk' in results and 'dagostino' in results:
                is_normal = results['shapiro_wilk']['is_normal'] and results['dagostino']['is_normal']
            elif 'shapiro_wilk' in results:
                is_normal = results['shapiro_wilk']['is_normal']
            elif 'dagostino' in results:
                is_normal = results['dagostino']['is_normal']
            else:
                is_normal = None
            
            results['overall_assessment'] = {
                'is_normal': is_normal,
                'sample_size': n,
                'recommendation': '数据符合正态分布，可使用参数检验' if is_normal else '数据不符合正态分布，建议使用非参数检验'
            }
            
            return ensure_json_serializable(results)
            
        except Exception as e:
            result = {
                'error': str(e),
                'method': 'Normality Test'
            }
            return ensure_json_serializable(result)
    

    
    @staticmethod
    def mann_whitney_u_test_large(sample1: List[float], sample2: List[float], alternative: str = 'two-sided', confidence_level: float = 0.95) -> Dict[str, Any]:
        """
        Mann-Whitney U检验（大样本版本，使用正态近似）
        """
        try:
            statistic, p_value = mannwhitneyu(sample1, sample2, alternative=alternative, use_continuity=True)
            
            # 计算效应量（r = Z / sqrt(N)）
            n1, n2 = len(sample1), len(sample2)
            total_n = n1 + n2
            
            # 计算Z统计量
            mean_u = n1 * n2 / 2
            std_u = np.sqrt(n1 * n2 * (n1 + n2 + 1) / 12)
            z_score = (statistic - mean_u) / std_u
            effect_size_r = abs(z_score) / np.sqrt(total_n)
            
            # 计算Hodges-Lehmann位置参数差的置信区间（大样本近似）
            confidence_interval = TwoSampleNonparametricTestAnalyzer._calculate_hodges_lehmann_ci_large(
                sample1, sample2, confidence_level
            )
            
            reject_null = p_value < 0.05
            
            result = {
                'u_statistic': statistic,
                'z_score': z_score,
                'p_value': p_value,
                'reject_null': reject_null,
                'effect_size_r': effect_size_r,
                'confidence_interval': confidence_interval,
                'confidence_level': confidence_level,
                'alternative': alternative,
                'method': 'Mann-Whitney U Test (Large Sample)',
                'interpretation': f"U统计量为{statistic:.4f}，Z得分为{z_score:.4f}，p值为{p_value:.4f}，效应量r为{effect_size_r:.4f}，{'拒绝' if reject_null else '不拒绝'}原假设。Hodges-Lehmann位置参数差的{confidence_level*100}%置信区间为[{confidence_interval['lower']:.4f}, {confidence_interval['upper']:.4f}]"
            }
            return ensure_json_serializable(result)
        except Exception as e:
            result = {
                'error': str(e),
                'method': 'Mann-Whitney U Test (Large Sample)'
            }
            return ensure_json_serializable(result)
    
    @staticmethod
    def _calculate_hodges_lehmann_ci_large(sample1: List[float], sample2: List[float], confidence_level: float) -> Dict[str, Any]:
        """
        计算Hodges-Lehmann位置参数差的置信区间（大样本近似方法）
        """
        try:
            # 计算所有成对差值
            differences = []
            for x in sample1:
                for y in sample2:
                    differences.append(x - y)
            
            differences = np.array(differences)
            
            # Hodges-Lehmann估计量（中位数）
            hodges_lehmann_estimate = float(np.median(differences))
            
            # 大样本近似方法计算置信区间
            n1, n2 = len(sample1), len(sample2)
            n_total = n1 * n2
            
            # 计算标准误（大样本近似）
            from scipy.stats import norm
            alpha = 1 - confidence_level
            z_critical = norm.ppf(1 - alpha/2)
            
            # Mann-Whitney统计量的方差
            var_mw = n1 * n2 * (n1 + n2 + 1) / 12
            se_hl = np.sqrt(var_mw) / n_total
            
            # 使用正态近似计算置信区间
            margin_error = z_critical * se_hl * np.std(differences)
            
            return {
                'lower': float(hodges_lehmann_estimate - margin_error),
                'upper': float(hodges_lehmann_estimate + margin_error),
                'estimate': hodges_lehmann_estimate
            }
        except Exception as e:
            return {
                'lower': None,
                'upper': None,
                'estimate': None,
                'error': str(e)
            }
    
    @staticmethod
    def wilcoxon_rank_sum_test(sample1: List[float], sample2: List[float], confidence_level: float = 0.95) -> Dict[str, Any]:
        """
        Wilcoxon秩和检验（适合大样本）
        """
        try:
            statistic, p_value = ranksums(sample1, sample2)
            
            # 计算效应量
            n1, n2 = len(sample1), len(sample2)
            total_n = n1 + n2
            effect_size_r = abs(statistic) / np.sqrt(total_n)
            
            # 计算Hodges-Lehmann位置参数差的置信区间
            confidence_interval = TwoSampleNonparametricTestAnalyzer._calculate_hodges_lehmann_ci_large(
                sample1, sample2, confidence_level
            )
            
            reject_null = p_value < 0.05
            
            result = {
                'z_statistic': statistic,
                'p_value': p_value,
                'reject_null': reject_null,
                'effect_size_r': effect_size_r,
                'confidence_interval': confidence_interval,
                'confidence_level': confidence_level,
                'method': 'Wilcoxon Rank Sum Test',
                'interpretation': f"Z统计量为{statistic:.4f}，p值为{p_value:.4f}，效应量r为{effect_size_r:.4f}，{'拒绝' if reject_null else '不拒绝'}原假设。Hodges-Lehmann位置参数差的{confidence_level*100}%置信区间为[{confidence_interval['lower']:.4f}, {confidence_interval['upper']:.4f}]"
            }
            return ensure_json_serializable(result)
        except Exception as e:
            result = {
                'error': str(e),
                'method': 'Wilcoxon Rank Sum Test'
            }
            return ensure_json_serializable(result)
    
    @staticmethod
    def kolmogorov_smirnov_test(sample1: List[float], sample2: List[float]) -> Dict[str, Any]:
        """
        Kolmogorov-Smirnov双样本检验
        """
        try:
            statistic, p_value = ks_2samp(sample1, sample2)
            
            reject_null = p_value < 0.05
            
            result = {
                'ks_statistic': statistic,
                'p_value': p_value,
                'reject_null': reject_null,
                'method': 'Kolmogorov-Smirnov Test',
                'interpretation': f"KS统计量为{statistic:.4f}，p值为{p_value:.4f}，{'拒绝' if reject_null else '不拒绝'}原假设"
            }
            return ensure_json_serializable(result)
        except Exception as e:
            result = {
                'error': str(e),
                'method': 'Kolmogorov-Smirnov Test'
            }
            return ensure_json_serializable(result)
    

    
    @staticmethod
    def auto_select_method(sample1: List[float], sample2: List[float], alpha: float = 0.05) -> Dict[str, Any]:
        """
        根据数据特征自动选择合适的大样本近似检验方法
        """
        try:
            analyzer = TwoSampleNonparametricTestAnalyzer()
            
            # 检查样本大小
            size_info = analyzer.check_sample_size(sample1, sample2)
            total_n = size_info['total_size']
            
            # 大样本服务只处理n>30的情况
            if total_n <= 30:
                result = {
                    'selected_method': 'use-small-sample-service',
                    'reason': f'样本量较小(n={total_n})，请使用小样本精确检验服务',
                    'recommended_service': 'mcp.config.usrlocalmcp.two-sample-exact-test'
                }
                return ensure_json_serializable(result)
            
            # 检查正态性
            norm_test1 = analyzer.normality_test(sample1)
            norm_test2 = analyzer.normality_test(sample2)
            
            is_normal1 = norm_test1.get('overall_assessment', {}).get('is_normal', False)
            is_normal2 = norm_test2.get('overall_assessment', {}).get('is_normal', False)
            both_normal = is_normal1 and is_normal2
            
            # 根据数据特征选择合适的非参数检验方法
            # 对于大样本，优先推荐Mann-Whitney U检验（大样本近似版本）
            result = {
                'selected_method': 'mann-whitney-u-test-large',
                'reason': f'大样本(n={total_n})，推荐Mann-Whitney U检验（大样本近似版本）作为稳健的非参数检验方法',
                'alternative_methods': [
                    'wilcoxon-rank-sum-test',
                    'kolmogorov-smirnov-test'
                ],
                'normality_info': {
                    'sample1_normal': is_normal1,
                    'sample2_normal': is_normal2,
                    'both_normal': both_normal
                }
            }
            return ensure_json_serializable(result)
                
        except Exception as e:
            result = {
                'error': str(e),
                'selected_method': 'unknown'
            }
            return ensure_json_serializable(result)

@server.list_tools()
async def handle_list_tools() -> List[Tool]:
    """列出可用的工具"""
    return [
        Tool(
            name="auto_select_method",
            description="根据数据特征自动选择合适的大样本统计检验方法",
            inputSchema={
                "type": "object",
                "properties": {
                    "sample1": {
                        "type": "array",
                        "items": {"type": "number"},
                        "description": "第一个样本数组"
                    },
                    "sample2": {
                        "type": "array",
                        "items": {"type": "number"},
                        "description": "第二个样本数组"
                    },
                    "alpha": {
                        "type": "number",
                        "description": "显著性水平（默认0.05）",
                        "default": 0.05
                    }
                },
                "required": ["sample1", "sample2"]
            }
        ),
        Tool(
            name="check_sample_size",
            description="检查样本大小并给出大样本检验方法建议",
            inputSchema={
                "type": "object",
                "properties": {
                    "sample1": {
                        "type": "array",
                        "items": {"type": "number"},
                        "description": "第一个样本数组"
                    },
                    "sample2": {
                        "type": "array",
                        "items": {"type": "number"},
                        "description": "第二个样本数组"
                    },
                    "confidence_level": {
                        "type": "number",
                        "description": "置信水平（默认0.95）",
                        "default": 0.95
                    }
                },
                "required": ["sample1", "sample2"]
            }
        ),
        Tool(
            name="normality_test",
            description="对样本进行正态性检验，包括Shapiro-Wilk和D'Agostino检验",
            inputSchema={
                "type": "object",
                "properties": {
                    "sample": {
                        "type": "array",
                        "items": {"type": "number"},
                        "description": "样本数据数组"
                    }
                },
                "required": ["sample"]
            }
        ),
        Tool(
            name="mann_whitney_u_test_large",
            description="Mann-Whitney U检验（大样本版本），使用正态近似",
            inputSchema={
                "type": "object",
                "properties": {
                    "sample1": {
                        "type": "array",
                        "items": {"type": "number"},
                        "description": "第一个样本数组"
                    },
                    "sample2": {
                        "type": "array",
                        "items": {"type": "number"},
                        "description": "第二个样本数组"
                    },
                    "alternative": {
                        "type": "string",
                        "description": "备择假设类型（'two-sided', 'less', 'greater'）",
                        "default": "two-sided"
                    },
                    "confidence_level": {
                        "type": "number",
                        "description": "置信水平（默认0.95）",
                        "default": 0.95
                    }
                },
                "required": ["sample1", "sample2"]
            }
        ),
        Tool(
            name="wilcoxon_rank_sum_test",
            description="Wilcoxon秩和检验（大样本近似版本），使用正态近似，适合n>30的样本",
            inputSchema={
                "type": "object",
                "properties": {
                    "sample1": {
                        "type": "array",
                        "items": {"type": "number"},
                        "description": "第一个样本数组"
                    },
                    "sample2": {
                        "type": "array",
                        "items": {"type": "number"},
                        "description": "第二个样本数组"
                    }
                },
                "required": ["sample1", "sample2"]
            }
        ),
        Tool(
            name="kolmogorov_smirnov_test",
            description="Kolmogorov-Smirnov双样本检验（大样本近似版本），检验两个样本是否来自同一分布",
            inputSchema={
                "type": "object",
                "properties": {
                    "sample1": {
                        "type": "array",
                        "items": {"type": "number"},
                        "description": "第一个样本数组"
                    },
                    "sample2": {
                        "type": "array",
                        "items": {"type": "number"},
                        "description": "第二个样本数组"
                    }
                },
                "required": ["sample1", "sample2"]
            }
        )
    ]

@server.call_tool()
async def handle_call_tool(name: str, arguments: Dict[str, Any]) -> List[TextContent]:
    """处理工具调用"""
    try:
        logger.info(f"开始处理工具调用: {name}")
        logger.info(f"参数: {arguments}")
        
        analyzer = TwoSampleNonparametricTestAnalyzer()
        
        if name == "auto_select_method":
            result = analyzer.auto_select_method(
                sample1=arguments["sample1"],
                sample2=arguments["sample2"],
                alpha=arguments.get("alpha", 0.05)
            )
        elif name == "check_sample_size":
            result = analyzer.check_sample_size(
                sample1=arguments["sample1"],
                sample2=arguments["sample2"]
            )
        elif name == "normality_test":
            result = analyzer.normality_test(
                sample=arguments["sample"]
            )

        elif name == "mann_whitney_u_test_large":
            # 检查样本大小
            total_n = len(arguments["sample1"]) + len(arguments["sample2"])
            if total_n <= 30:
                result = {
                    'error': f'样本量过小(n={total_n})，请使用小样本精确检验服务',
                    'recommended_service': 'mcp.config.usrlocalmcp.two-sample-exact-test',
                    'method': 'Mann-Whitney U Test (Large Sample)'
                }
            else:
                result = analyzer.mann_whitney_u_test_large(
                    sample1=arguments["sample1"],
                    sample2=arguments["sample2"],
                    alternative=arguments.get("alternative", "two-sided"),
                    confidence_level=arguments.get("confidence_level", 0.95)
                )
        elif name == "wilcoxon_rank_sum_test":
            # 检查样本大小
            total_n = len(arguments["sample1"]) + len(arguments["sample2"])
            if total_n <= 30:
                result = {
                    'error': f'样本量过小(n={total_n})，请使用小样本精确检验服务',
                    'recommended_service': 'mcp.config.usrlocalmcp.two-sample-exact-test',
                    'method': 'Wilcoxon Rank Sum Test (Large Sample)'
                }
            else:
                result = analyzer.wilcoxon_rank_sum_test(
                    sample1=arguments["sample1"],
                    sample2=arguments["sample2"],
                    confidence_level=arguments.get("confidence_level", 0.95)
                )
        elif name == "kolmogorov_smirnov_test":
            # 检查样本大小
            total_n = len(arguments["sample1"]) + len(arguments["sample2"])
            if total_n <= 30:
                result = {
                    'error': f'样本量过小(n={total_n})，请使用小样本精确检验服务',
                    'recommended_service': 'mcp.config.usrlocalmcp.two-sample-exact-test',
                    'method': 'Kolmogorov-Smirnov Test (Large Sample)'
                }
            else:
                result = analyzer.kolmogorov_smirnov_test(
                    sample1=arguments["sample1"],
                    sample2=arguments["sample2"]
                )
        else:
            raise ValueError(f"未知的工具名称: {name}")
        
        logger.info("开始类型转换")
        
        # 彻底清理所有numpy类型的递归函数
        def deep_clean_types(obj):
            if obj is None:
                return None
            elif isinstance(obj, dict):
                cleaned = {}
                for k, v in obj.items():
                    clean_key = str(k) if not isinstance(k, str) else k
                    cleaned[clean_key] = deep_clean_types(v)
                return cleaned
            elif isinstance(obj, (list, tuple)):
                return [deep_clean_types(item) for item in obj]
            elif isinstance(obj, str):
                return str(obj)
            elif isinstance(obj, bool):
                return bool(obj)
            elif isinstance(obj, np.bool_):
                return bool(obj)
            elif isinstance(obj, (int, np.integer)):
                return int(obj)
            elif isinstance(obj, (float, np.floating)):
                return float(obj)
            elif isinstance(obj, np.ndarray):
                return obj.tolist()
            elif hasattr(obj, 'dtype'):
                item_value = obj.item()
                return deep_clean_types(item_value)
            elif hasattr(obj, 'item'):
                item_value = obj.item()
                return deep_clean_types(item_value)
            else:
                return obj
        
        result = deep_clean_types(result)
        logger.info("深度类型清理完成")
        
        logger.info("开始JSON序列化")
        try:
            json_str = json.dumps(result, ensure_ascii=False, indent=2, cls=NumpyEncoder)
            logger.info(f"JSON序列化成功，长度: {len(json_str)}")
        except Exception as json_error:
            logger.error(f"JSON序列化失败: {json_error}")
            raise json_error
        
        logger.info("创建TextContent")
        try:
            text_content = TextContent(type="text", text=json_str)
            logger.info(f"TextContent创建成功: {type(text_content)}")
        except Exception as tc_error:
            logger.error(f"TextContent创建失败: {tc_error}")
            raise tc_error
        
        logger.info("返回结果")
        return [text_content]
    
    except Exception as e:
        import traceback
        full_traceback = traceback.format_exc()
        logger.error(f"工具调用失败: {str(e)}")
        logger.error(f"错误类型: {type(e)}")
        logger.error(f"完整错误堆栈: {full_traceback}")
        
        error_result = {
            "error": str(e),
            "tool": name,
            "traceback": full_traceback
        }
        
        def convert_numpy_types_simple(obj):
            if isinstance(obj, dict):
                return {k: convert_numpy_types_simple(v) for k, v in obj.items()}
            elif isinstance(obj, list):
                return [convert_numpy_types_simple(v) for v in obj]
            elif isinstance(obj, np.bool_):
                return bool(obj)
            elif isinstance(obj, np.integer):
                return int(obj)
            elif isinstance(obj, np.floating):
                return float(obj)
            elif isinstance(obj, np.ndarray):
                return obj.tolist()
            elif hasattr(obj, 'item'):
                return obj.item()
            return obj
        
        error_result = convert_numpy_types_simple(error_result)
        try:
            json_str = json.dumps(error_result, cls=NumpyEncoder, ensure_ascii=False, indent=2)
            return [TextContent(type="text", text=json_str)]
        except Exception as json_error:
            logger.error(f"JSON序列化错误结果失败: {str(json_error)}")
            return [TextContent(type="text", text=f'{"error": "序列化失败: {str(json_error)}", "original_error": "{str(e)}", "tool": "{name}"}')]

if __name__ == "__main__":
    import asyncio
    from mcp.server.stdio import stdio_server
    
    async def main():
        async with stdio_server() as (read_stream, write_stream):
            await server.run(
                read_stream,
                write_stream,
                InitializationOptions(
                    server_name="two-sample-nonparametric-test",
                    server_version="1.0.0",
                    capabilities=server.get_capabilities(
                        notification_options=NotificationOptions(),
                        experimental_capabilities={}
                    )
                )
            )
    
    asyncio.run(main())