#!/usr/bin/env python3
"""
非参数统计分析 MCP 服务器
提供单样本、双样本、多样本数据的统计分析功能
"""

import asyncio
import json
import sys
from typing import Any, Dict, List, Optional, Union, Tuple
from functools import lru_cache
import numpy as np
from scipy import stats
from scipy.stats import jarque_bera, shapiro, anderson, kstest
import logging

# MCP imports
from mcp.server.models import InitializationOptions
from mcp.server import NotificationOptions, Server
from mcp.types import (
    Resource,
    Tool,
    TextContent,
    ImageContent,
    EmbeddedResource,
    LoggingLevel
)

# 配置常量
SIGNIFICANCE_LEVEL = 0.05  # 显著性水平
MIN_SAMPLE_SIZE = 2        # 最小样本大小
MAX_DATA_SIZE = 10000      # 最大数据大小
MAX_GROUPS = 50            # 最大组数

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

# 自定义异常类
class StatisticsError(Exception):
    """统计操作的基础异常类"""
    pass

class InsufficientDataError(StatisticsError):
    """数据不足时引发的异常"""
    pass

class InvalidDataError(StatisticsError):
    """数据无效时引发的异常"""
    pass

server = Server("nonparametric-statistics")

# 输入验证函数
def validate_data(data: List[float], min_size: int = MIN_SAMPLE_SIZE, max_size: int = MAX_DATA_SIZE) -> None:
    """验证数据的有效性
    
    Args:
        data: 要验证的数据
        min_size: 最小样本大小
        max_size: 最大样本大小
        
    Raises:
        InsufficientDataError: 当数据长度小于min_size时
        InvalidDataError: 当数据包含NaN或无穷大值时
    """
    if not data:
        raise InsufficientDataError("数据不能为空")
        
    if len(data) < min_size:
        raise InsufficientDataError(f"数据长度({len(data)})小于最小要求({min_size})")
        
    if len(data) > max_size:
        raise InvalidDataError(f"数据长度({len(data)})超过最大限制({max_size})")
    
    # 检查NaN和无穷大值
    data_array = np.array(data)
    if np.isnan(data_array).any():
        raise InvalidDataError("数据包含NaN值")
        
    if np.isinf(data_array).any():
        raise InvalidDataError("数据包含无穷大值")


def validate_multi_sample_data(data_groups: List[List[float]], min_groups: int = 2, max_groups: int = MAX_GROUPS) -> None:
    """验证多样本数据的有效性
    
    Args:
        data_groups: 多组数据的列表
        min_groups: 最小组数
        max_groups: 最大组数
        
    Raises:
        InsufficientDataError: 当组数小于min_groups时
        InvalidDataError: 当任何组包含无效数据时
    """
    if not data_groups:
        raise InsufficientDataError("数据组不能为空")
        
    if len(data_groups) < min_groups:
        raise InsufficientDataError(f"数据组数({len(data_groups)})小于最小要求({min_groups})")
        
    if len(data_groups) > max_groups:
        raise InvalidDataError(f"数据组数({len(data_groups)})超过最大限制({max_groups})")
    
    # 验证每个组的数据
    for i, group in enumerate(data_groups):
        try:
            validate_data(group)
        except (InsufficientDataError, InvalidDataError) as e:
            raise InvalidDataError(f"组 {i+1} 数据无效: {str(e)}") from e

class StatisticsAnalyzer:
    """统计分析器类"""
    
    @staticmethod
    def _calculate_sample_size_assessment(total_sample_size: int) -> Dict[str, Any]:
        """计算样本量评估信息"""
        if total_sample_size <= 30:
            return {
                "total_sample_size": total_sample_size,
                "size_category": "小样本",
                "recommended_service": "exact_test",
                "statistical_power": "中等",
                "suggestions": [
                    "样本量较小，推荐使用精确检验方法",
                    "精确检验能提供准确的p值，不依赖大样本假设"
                ]
            }
        else:
            return {
                "total_sample_size": total_sample_size,
                "size_category": "大样本",
                "recommended_service": "approximation_test",
                "statistical_power": "高",
                "suggestions": [
                    "样本量充足，推荐使用近似检验方法",
                    "近似检验计算更快，基于正态分布近似"
                ]
            }
    
    @staticmethod
    def five_number_summary(data: List[float]) -> Dict[str, float]:
        """
        计算五数概括
        
        Args:
            data: 数值数据列表
            
        Returns:
            包含最小值、Q1、中位数、Q3、最大值的字典
            
        Raises:
            InvalidDataError: 当数据无效时
            InsufficientDataError: 当数据不足时
        """
        validate_data(data)
        data_array = np.array(data)
        
        logger.debug(f"计算五数概括，数据大小: {len(data)}")
        
        return {
            "minimum": float(np.min(data_array)),
            "q1": float(np.percentile(data_array, 25)),
            "median": float(np.median(data_array)),
            "q3": float(np.percentile(data_array, 75)),
            "maximum": float(np.max(data_array))
        }
    
    @staticmethod
    def basic_statistics(data: List[float]) -> Dict[str, float]:
        """
        计算基本统计量
        
        Args:
            data: 数值数据列表
            
        Returns:
            包含均值、极差、方差、标准差、变异系数、偏度、峰度的字典
            
        Raises:
            InvalidDataError: 当数据无效时
            InsufficientDataError: 当数据不足时
        """
        validate_data(data, min_size=3)  # 偏度和峰度需要至少3个数据点
        data_array = np.array(data)
        
        logger.debug(f"计算基本统计量，数据大小: {len(data)}")
        
        mean = np.mean(data_array)
        std = np.std(data_array, ddof=1)  # 样本标准差
        var = np.var(data_array, ddof=1)  # 样本方差
        
        # 处理变异系数的特殊情况
        if abs(mean) < 1e-10:  # 均值接近0
            cv = float('inf')
            logger.warning("均值接近0，变异系数设为无穷大")
        else:
            cv = float(std / abs(mean))
        
        try:
            skewness = float(stats.skew(data_array))
            kurtosis = float(stats.kurtosis(data_array))
        except Exception as e:
            logger.warning(f"计算偏度或峰度时出错: {e}")
            skewness = float('nan')
            kurtosis = float('nan')
        
        return {
            "mean": float(mean),
            "range": float(np.max(data_array) - np.min(data_array)),
            "variance": float(var),
            "std_deviation": float(std),
            "coefficient_of_variation": cv,
            "skewness": skewness,
            "kurtosis": kurtosis
        }
    
    # 游程检验的辅助函数，使用lru_cache提高性能
    @staticmethod
    @lru_cache(maxsize=128)
    def _runs_test(data_tuple: Tuple[float, ...]) -> Dict[str, Any]:
        """
        执行游程检验（缓存版本）
        
        Args:
            data_tuple: 数据元组（必须是元组以便缓存）
            
        Returns:
            包含游程检验结果的字典
        """
        data = np.array(data_tuple)
        median = np.median(data)
        runs, n1, n2 = 0, 0, 0
        
        # 转换为二进制序列
        binary_seq = [1 if x > median else 0 for x in data]
        
        # 计算游程数
        for i in range(len(binary_seq)):
            if binary_seq[i] == 1:
                n1 += 1
            else:
                n2 += 1
                
            if i > 0 and binary_seq[i] != binary_seq[i-1]:
                runs += 1
        
        runs += 1  # 第一个游程
        
        # 计算期望游程数和方差
        if n1 == 0 or n2 == 0:
            # 所有值都相同，无法进行游程检验
            return {
                "runs": runs,
                "expected_runs": float('nan'),
                "z_statistic": float('nan'),
                "p_value": float('nan'),
                "error": "所有值都相同，无法进行游程检验"
            }
            
        expected_runs = (2 * n1 * n2) / (n1 + n2) + 1
        variance_runs = (2 * n1 * n2 * (2 * n1 * n2 - n1 - n2)) / ((n1 + n2) ** 2 * (n1 + n2 - 1))
        
        # Z统计量
        if variance_runs > 0:
            z_stat = (runs - expected_runs) / np.sqrt(variance_runs)
            p_value = 2 * (1 - stats.norm.cdf(abs(z_stat)))
        else:
            z_stat = 0
            p_value = 1
            
        return {
            "runs": runs,
            "expected_runs": expected_runs,
            "z_statistic": z_stat,
            "p_value": p_value
        }
    
    @staticmethod
    def white_noise_test(data: List[float]) -> Dict[str, Any]:
        """
        白噪声检验（随机性检验）
        
        Args:
            data: 数值数据列表
            
        Returns:
            包含Ljung-Box检验和游程检验结果的字典
            
        Raises:
            InvalidDataError: 当数据无效时
            InsufficientDataError: 当数据不足时
        """
        # 白噪声检验需要更多数据点
        validate_data(data, min_size=8)  
        data_array = np.array(data)
        
        logger.debug(f"执行白噪声检验，数据大小: {len(data)}")
        
        result = {}
        
        # Ljung-Box检验
        try:
            from statsmodels.stats.diagnostic import acorr_ljungbox
            lags = min(10, max(1, len(data_array)//4))
            ljung_box = acorr_ljungbox(data_array, lags=lags, return_df=True)
            ljung_box_pvalue = float(ljung_box['lb_pvalue'].iloc[-1])
            
            result["ljung_box_test"] = {
                "p_value": ljung_box_pvalue,
                "is_white_noise": bool(ljung_box_pvalue > SIGNIFICANCE_LEVEL),
                "lags": lags
            }
        except ImportError:
            logger.warning("statsmodels未安装，无法执行Ljung-Box检验")
            result["ljung_box_test"] = {
                "error": "statsmodels未安装，无法执行Ljung-Box检验"
            }
        except Exception as e:
            logger.warning(f"Ljung-Box检验失败: {e}")
            result["ljung_box_test"] = {
                "error": f"检验失败: {str(e)}"
            }
        
        # 游程检验
        try:
            # 转换为元组以便缓存
            data_tuple = tuple(float(x) for x in data_array)
            runs_result = StatisticsAnalyzer._runs_test(data_tuple)
            
            if "error" in runs_result:
                result["runs_test"] = runs_result
            else:
                result["runs_test"] = {
                "runs": runs_result["runs"],
                "expected_runs": runs_result["expected_runs"],
                "z_statistic": runs_result["z_statistic"],
                "p_value": runs_result["p_value"],
                "is_random": bool(runs_result["p_value"] > SIGNIFICANCE_LEVEL)
            }
        except Exception as e:
            logger.warning(f"游程检验失败: {e}")
            result["runs_test"] = {
                "error": f"检验失败: {str(e)}"
            }
        
        return result
    
    @staticmethod
    def two_sample_test(data1: List[float], data2: List[float]) -> Dict[str, Any]:
        """
        双样本检验（概览版本，提供基础检验和服务推荐）
        
        Args:
            data1: 第一组数据
            data2: 第二组数据
            
        Returns:
            包含基础检验结果、样本量评估和服务推荐的字典
            
        Raises:
            InvalidDataError: 当数据无效时
            InsufficientDataError: 当数据不足时
        """
        validate_data(data1, min_size=3)
        validate_data(data2, min_size=3)
        
        data1_array = np.array(data1)
        data2_array = np.array(data2)
        
        # 计算总样本量
        total_sample_size = len(data1) + len(data2)
        
        logger.debug(f"执行双样本检验，样本大小: {len(data1)}, {len(data2)}, 总样本量: {total_sample_size}")
        
        # 样本量评估
        sample_size_assessment = StatisticsAnalyzer._calculate_sample_size_assessment(total_sample_size)
        
        result = {
            "sample_info": {
                "sample1_size": len(data1),
                "sample2_size": len(data2),
                "total_sample_size": total_sample_size,
                "assessment": sample_size_assessment
            }
        }
        
        # 基础描述性统计（快速概览）
        result["descriptive_overview"] = {
            "sample1_stats": {
                "mean": float(np.mean(data1_array)),
                "median": float(np.median(data1_array)),
                "std": float(np.std(data1_array, ddof=1)) if len(data1) > 1 else 0.0
            },
            "sample2_stats": {
                "mean": float(np.mean(data2_array)),
                "median": float(np.median(data2_array)),
                "std": float(np.std(data2_array, ddof=1)) if len(data2) > 1 else 0.0
            }
        }
        
        # 服务推荐
        recommended_service = sample_size_assessment["recommended_service"]
        result["service_recommendations"] = {
            "primary_service": recommended_service,
            "reasoning": f"基于总样本量{total_sample_size}的评估",
            "available_methods": {
                "mann_whitney_u": "Mann-Whitney U检验",
                "wilcoxon_ranksum": "Wilcoxon秩和检验", 
                "kolmogorov_smirnov": "Kolmogorov-Smirnov检验",
                "fisher_exact": "Fisher精确检验（分类数据）",
                "permutation_test": "置换检验（极小样本）"
            },
            "next_steps": [
                f"使用 {recommended_service} 服务进行详细分析",
                "根据数据特征选择合适的检验方法",
                "考虑数据的分布特征和研究假设"
            ]
        }
        
        # 仅在样本量较小时提供快速检验结果
        if total_sample_size <= 30:
            try:
                mannwhitney_stat, mannwhitney_p = stats.mannwhitneyu(
                    data1_array, data2_array, alternative='two-sided'
                )
                result["quick_test_result"] = {
                    "method": "Mann-Whitney U检验（快速结果）",
                    "p_value": float(mannwhitney_p),
                    "significant": bool(mannwhitney_p < SIGNIFICANCE_LEVEL),
                    "note": "建议使用专门的检验服务获取完整结果"
                }
            except Exception as e:
                result["quick_test_result"] = {
                    "error": f"快速检验失败: {str(e)}",
                    "note": "请使用专门的检验服务"
                }
        else:
            result["quick_test_result"] = {
                "note": f"样本量较大({total_sample_size})，建议直接使用{recommended_service}服务"
            }
        
        return result
    
    @staticmethod
    def multi_sample_test(data_groups: List[List[float]]) -> Dict[str, Any]:
        """
        多样本检验
        
        Args:
            data_groups: 多组数据的列表
            
        Returns:
            包含Kruskal-Wallis检验和Friedman检验结果的字典
            
        Raises:
            InvalidDataError: 当数据无效时
            InsufficientDataError: 当数据不足时
        """
        validate_multi_sample_data(data_groups)
        
        logger.debug(f"执行多样本检验，组数: {len(data_groups)}, 样本大小: {[len(g) for g in data_groups]}")
        
        result = {}
        
        # Kruskal-Wallis检验
        try:
            kruskal_stat, kruskal_p = stats.kruskal(*data_groups)
            result["kruskal_wallis"] = {
                "statistic": float(kruskal_stat),
                "p_value": float(kruskal_p),
                "significant": bool(kruskal_p < SIGNIFICANCE_LEVEL),
                "method": "Kruskal-Wallis检验",
                "groups_count": len(data_groups)
            }
        except Exception as e:
            logger.warning(f"Kruskal-Wallis检验失败: {e}")
            result["kruskal_wallis"] = {
                "error": f"检验失败: {str(e)}"
            }
        
        # Friedman检验（需要相同长度的组）
        try:
            lengths = [len(group) for group in data_groups]
            if len(set(lengths)) == 1:  # 所有组长度相同
                friedman_stat, friedman_p = stats.friedmanchisquare(*data_groups)
                result["friedman"] = {
                    "statistic": float(friedman_stat),
                    "p_value": float(friedman_p),
                    "significant": bool(friedman_p < SIGNIFICANCE_LEVEL),
                    "method": "Friedman检验",
                    "groups_count": len(data_groups),
                    "samples_per_group": lengths[0]
                }
            else:
                result["friedman"] = {
                    "error": "Friedman检验要求所有组具有相同的样本数量",
                    "group_sizes": lengths
                }
        except Exception as e:
            logger.warning(f"Friedman检验失败: {e}")
            result["friedman"] = {
                "error": f"检验失败: {str(e)}"
            }
        
        return result

@server.list_tools()
async def handle_list_tools() -> List[Tool]:
    """列出可用的工具"""
    return [
        Tool(
            name="single_sample_analysis",
            description="对单样本数据进行完整的统计分析，包括五数概括、基本统计量和随机性检验",
            inputSchema={
                "type": "object",
                "properties": {
                    "data": {
                        "type": "array",
                        "items": {"type": "number"},
                        "description": "数值数据数组"
                    }
                },
                "required": ["data"]
            }
        ),
        Tool(
            name="two_sample_analysis",
            description="对两个样本进行概览分析，包括样本量评估、描述性统计和服务推荐。根据样本量自动推荐使用exact_test或approximation_test服务进行详细分析",
            inputSchema={
                "type": "object",
                "properties": {
                    "data1": {
                        "type": "array",
                        "items": {"type": "number"},
                        "description": "第一组数据"
                    },
                    "data2": {
                        "type": "array",
                        "items": {"type": "number"},
                        "description": "第二组数据"
                    }
                },
                "required": ["data1", "data2"]
            }
        ),
        Tool(
            name="multi_sample_analysis",
            description="对多个样本进行比较分析，包括Kruskal-Wallis检验和Friedman检验",
            inputSchema={
                "type": "object",
                "properties": {
                    "data_groups": {
                        "type": "array",
                        "items": {
                            "type": "array",
                            "items": {"type": "number"}
                        },
                        "description": "多组数据的数组"
                    }
                },
                "required": ["data_groups"]
            }
        ),
        Tool(
            name="descriptive_statistics",
            description="计算描述性统计量：五数概括、均值、极差、方差、变异系数、偏度系数、峰度系数",
            inputSchema={
                "type": "object",
                "properties": {
                    "data": {
                        "type": "array",
                        "items": {"type": "number"},
                        "description": "数值数据数组"
                    }
                },
                "required": ["data"]
            }
        ),
        Tool(
            name="randomness_test",
            description="检验数据的随机性（白噪声检验），包括Ljung-Box检验和游程检验",
            inputSchema={
                "type": "object",
                "properties": {
                    "data": {
                        "type": "array",
                        "items": {"type": "number"},
                        "description": "数值数据数组"
                    }
                },
                "required": ["data"]
            }
        )
    ]

@server.call_tool()
async def handle_call_tool(name: str, arguments: Dict[str, Any]) -> List[TextContent]:
    """处理工具调用，包含改进的错误处理和参数验证"""
    analyzer = StatisticsAnalyzer()
    
    def validate_arguments(required_params: List[str]) -> Dict[str, Any]:
        """验证必需参数"""
        missing_params = [param for param in required_params if param not in arguments]
        if missing_params:
            raise InvalidDataError(f"缺少必需参数: {', '.join(missing_params)}")
        return {param: arguments[param] for param in required_params}
    
    def create_error_response(error: Exception) -> List[TextContent]:
        """创建标准化错误响应"""
        error_result = {
            "status": "error",
            "error_type": type(error).__name__,
            "message": str(error),
            "tool_name": name
        }
        return [TextContent(
            type="text",
            text=json.dumps(error_result, indent=2, ensure_ascii=False)
        )]
    
    def create_success_response(result: Dict[str, Any], description: str) -> List[TextContent]:
        """创建标准化成功响应"""
        result["status"] = "success"
        result["tool_name"] = name
        return [TextContent(
            type="text",
            text=f"{description}：\n{json.dumps(result, indent=2, ensure_ascii=False)}"
        )]
    
    try:
        if name == "single_sample_analysis":
            params = validate_arguments(["data"])
            data = params["data"]
            
            if not isinstance(data, list):
                raise InvalidDataError("参数'data'必须是数值列表")
            
            # 完整的单样本分析
            five_num = analyzer.five_number_summary(data)
            basic_stats = analyzer.basic_statistics(data)
            randomness = analyzer.white_noise_test(data)
            
            result = {
                "sample_size": len(data),
                "five_number_summary": five_num,
                "descriptive_statistics": basic_stats,
                "randomness_tests": randomness,
                "analysis_type": "单样本统计分析"
            }
            
            logger.info(f"单样本分析完成，样本大小: {len(data)}")
            return create_success_response(result, "单样本统计分析结果")
        
        elif name == "two_sample_analysis":
            params = validate_arguments(["data1", "data2"])
            data1, data2 = params["data1"], params["data2"]
            
            if not isinstance(data1, list) or not isinstance(data2, list):
                raise InvalidDataError("参数'data1'和'data2'必须是数值列表")
            
            result = analyzer.two_sample_test(data1, data2)
            result.update({
                "sample_sizes": {"group1": len(data1), "group2": len(data2)},
                "analysis_type": "双样本比较分析"
            })
            
            logger.info(f"双样本分析完成，样本大小: {len(data1)}, {len(data2)}")
            return create_success_response(result, "双样本比较分析结果")
        
        elif name == "multi_sample_analysis":
            params = validate_arguments(["data_groups"])
            data_groups = params["data_groups"]
            
            if not isinstance(data_groups, list) or not all(isinstance(group, list) for group in data_groups):
                raise InvalidDataError("参数'data_groups'必须是数值列表的列表")
            
            result = analyzer.multi_sample_test(data_groups)
            result.update({
                "sample_sizes": [len(group) for group in data_groups],
                "total_groups": len(data_groups),
                "analysis_type": "多样本比较分析"
            })
            
            logger.info(f"多样本分析完成，组数: {len(data_groups)}")
            return create_success_response(result, "多样本比较分析结果")
        
        elif name == "descriptive_statistics":
            params = validate_arguments(["data"])
            data = params["data"]
            
            if not isinstance(data, list):
                raise InvalidDataError("参数'data'必须是数值列表")
            
            five_num = analyzer.five_number_summary(data)
            basic_stats = analyzer.basic_statistics(data)
            
            result = {
                "sample_size": len(data),
                "five_number_summary": five_num,
                "descriptive_statistics": basic_stats,
                "analysis_type": "描述性统计"
            }
            
            logger.info(f"描述性统计完成，样本大小: {len(data)}")
            return create_success_response(result, "描述性统计结果")
        
        elif name == "randomness_test":
            params = validate_arguments(["data"])
            data = params["data"]
            
            if not isinstance(data, list):
                raise InvalidDataError("参数'data'必须是数值列表")
            
            result = analyzer.white_noise_test(data)
            result.update({
                "sample_size": len(data),
                "analysis_type": "随机性检验"
            })
            
            logger.info(f"随机性检验完成，样本大小: {len(data)}")
            return create_success_response(result, "随机性检验结果")
        
        else:
            error_result = {
                "status": "error",
                "error_type": "UnknownTool",
                "message": f"未知的工具: {name}",
                "available_tools": ["single_sample_analysis", "two_sample_analysis", "multi_sample_analysis", "descriptive_statistics", "randomness_test"]
            }
            return [TextContent(
                type="text",
                text=json.dumps(error_result, indent=2, ensure_ascii=False)
            )]
    
    except (InvalidDataError, InsufficientDataError, StatisticsError) as e:
        logger.warning(f"工具调用参数错误 [{name}]: {e}")
        return create_error_response(e)
    except KeyError as e:
        logger.error(f"工具调用缺少参数 [{name}]: {e}")
        return create_error_response(InvalidDataError(f"缺少必需参数: {str(e)}"))
    except Exception as e:
        logger.error(f"工具调用未预期错误 [{name}]: {e}")
        return create_error_response(Exception(f"执行失败: {str(e)}"))

async def main():
    """主函数"""
    # 运行MCP服务器
    import mcp.server.stdio
    async with mcp.server.stdio.stdio_server() as (read_stream, write_stream):
        await server.run(
            read_stream,
            write_stream,
            InitializationOptions(
                server_name="nonparametric-statistics",
                server_version="1.0.0",
                capabilities=server.get_capabilities(
                    notification_options=NotificationOptions(),
                    experimental_capabilities={},
                ),
            ),
        )

if __name__ == "__main__":
    asyncio.run(main())