#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
大样本非参数统计MCP服务器 (n > 30)
提供基于大样本近似的单样本位置检验
完全独立实现，不依赖外部模块
"""

import asyncio
import json
import logging
import sys
from typing import Any, Dict, List
import traceback
import math

import numpy as np
import scipy.stats as stats
from scipy.stats import norm
import mcp.server.stdio
from mcp.server import Server, NotificationOptions
from mcp.server.models import InitializationOptions
from mcp.types import Tool, TextContent

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('large_sample_mcp.log', encoding='utf-8'),
        logging.StreamHandler(sys.stdout)
    ]
)
logger = logging.getLogger(__name__)

# 创建MCP服务器实例
server = Server("large-sample-nonparametric")

# ==================== 核心统计函数 ====================

def validate_large_sample(data):
    """验证是否适合大样本方法"""
    n = len(data)
    if n <= 30:
        raise ValueError(f"样本量 {n} 不适合大样本近似方法，建议使用小样本精确方法")
    return n



def wilcoxon_signed_rank_large_sample(data, median_0=0):
    """Wilcoxon符号秩检验 - 大样本近似"""
    data = np.array(data)
    n = validate_large_sample(data)
    
    # 计算差值
    diff = data - median_0
    # 移除零值
    diff_nonzero = diff[diff != 0]
    n_nonzero = len(diff_nonzero)
    
    if n_nonzero == 0:
        return {
            'test_statistic': 0,
            'z_score': 0,
            'p_value': 1.0,
            'conclusion': '所有观测值等于假设中位数',
            'method': 'Wilcoxon符号秩检验(大样本近似)'
        }
    
    # 计算秩
    ranks = stats.rankdata(np.abs(diff_nonzero))
    # 计算正秩和
    W_plus = np.sum(ranks[diff_nonzero > 0])
    
    # 大样本近似
    expected_W = n_nonzero * (n_nonzero + 1) / 4
    var_W = n_nonzero * (n_nonzero + 1) * (2 * n_nonzero + 1) / 24
    
    # 连续性修正
    if W_plus > expected_W:
        z_score = (W_plus - 0.5 - expected_W) / np.sqrt(var_W)
    else:
        z_score = (W_plus + 0.5 - expected_W) / np.sqrt(var_W)
    
    # 双侧检验p值
    p_value = 2 * (1 - norm.cdf(abs(z_score)))
    
    return {
        'test_statistic': float(W_plus),
        'expected_value': float(expected_W),
        'z_score': float(z_score),
        'p_value': float(p_value),
        'n_nonzero': int(n_nonzero),
        'method': 'Wilcoxon符号秩检验(大样本近似)'
    }

def generalized_signed_rank_test_large_sample(data, median_0=0):
    """广义符号秩检验 - 大样本近似"""
    data = np.array(data)
    n = validate_large_sample(data)
    
    # 计算差值
    diff = data - median_0
    diff_nonzero = diff[diff != 0]
    n_nonzero = len(diff_nonzero)
    
    if n_nonzero == 0:
        return {
            'test_statistic': 0,
            'z_score': 0,
            'p_value': 1.0,
            'conclusion': '所有观测值等于假设中位数',
            'method': '广义符号秩检验(大样本近似)'
        }
    
    # 计算符号和秩
    abs_diff = np.abs(diff_nonzero)
    ranks = stats.rankdata(abs_diff)
    signed_ranks = ranks * np.sign(diff_nonzero)
    
    # 检验统计量
    T = np.sum(signed_ranks)
    
    # 大样本近似 - 正态分布
    expected_T = 0
    var_T = n_nonzero * (n_nonzero + 1) * (2 * n_nonzero + 1) / 6
    
    z_score = T / np.sqrt(var_T)
    p_value = 2 * (1 - norm.cdf(abs(z_score)))
    
    return {
        'test_statistic': float(T),
        'expected_value': float(expected_T),
        'variance': float(var_T),
        'z_score': float(z_score),
        'p_value': float(p_value),
        'n_nonzero': int(n_nonzero),
        'method': '广义符号秩检验(大样本近似)'
    }

def normal_scores_test_large_sample(data, median_0=0):
    """正态记分检验 - 大样本近似"""
    data = np.array(data)
    n = validate_large_sample(data)
    
    # 计算差值
    diff = data - median_0
    diff_nonzero = diff[diff != 0]
    n_nonzero = len(diff_nonzero)
    
    if n_nonzero == 0:
        return {
            'test_statistic': 0,
            'z_score': 0,
            'p_value': 1.0,
            'conclusion': '所有观测值等于假设中位数',
            'method': '正态记分检验(大样本近似)'
        }
    
    # 计算正态记分
    abs_diff = np.abs(diff_nonzero)
    ranks = stats.rankdata(abs_diff)
    
    # 生成正态记分
    normal_scores = np.array([norm.ppf((i + 0.5) / (n_nonzero + 1)) for i in range(n_nonzero)])
    
    # 按秩重新排列正态记分
    sorted_indices = np.argsort(ranks - 1)  # ranks从1开始，需要减1
    reordered_scores = np.zeros(n_nonzero)
    for i, idx in enumerate(sorted_indices):
        reordered_scores[ranks[i] - 1] = normal_scores[idx]
    
    # 计算检验统计量
    signs = np.sign(diff_nonzero)
    T = np.sum(signs * reordered_scores)
    
    # 大样本近似
    expected_T = 0
    var_T = np.sum(reordered_scores ** 2)
    
    z_score = T / np.sqrt(var_T)
    p_value = 2 * (1 - norm.cdf(abs(z_score)))
    
    return {
        'test_statistic': float(T),
        'expected_value': float(expected_T),
        'variance': float(var_T),
        'z_score': float(z_score),
        'p_value': float(p_value),
        'n_nonzero': int(n_nonzero),
        'method': '正态记分检验(大样本近似)'
    }

def quantile_confidence_interval_large_sample(data, quantile=0.5, confidence_level=0.95):
    """分位数置信区间 - 大样本近似"""
    data = np.array(data)
    n = validate_large_sample(data)
    
    # 排序数据
    sorted_data = np.sort(data)
    
    # 计算分位数的点估计
    point_estimate = np.quantile(data, quantile)
    
    # 大样本近似的置信区间
    alpha = 1 - confidence_level
    z_alpha_2 = norm.ppf(1 - alpha/2)
    
    # 计算置信区间的位置
    p = quantile
    variance = p * (1 - p) / n
    se = np.sqrt(variance)
    
    # 置信区间的理论位置
    lower_p = p - z_alpha_2 * se
    upper_p = p + z_alpha_2 * se
    
    # 确保在[0,1]范围内
    lower_p = max(0, lower_p)
    upper_p = min(1, upper_p)
    
    # 转换为数据索引
    lower_index = max(0, int(np.floor(lower_p * n)))
    upper_index = min(n-1, int(np.ceil(upper_p * n)))
    
    # 获取置信区间
    lower_bound = sorted_data[lower_index]
    upper_bound = sorted_data[upper_index]
    
    return {
        'quantile': quantile,
        'confidence_level': confidence_level,
        'point_estimate': float(point_estimate),
        'confidence_interval': {
            'lower': float(lower_bound),
            'upper': float(upper_bound)
        },
        'lower_index': lower_index,
        'upper_index': upper_index,
        'method': '大样本近似置信区间'
    }

def cox_stuart_trend_test_large_sample(data):
    """Cox-Stuart趋势检验 - 大样本近似"""
    data = np.array(data)
    n = validate_large_sample(data)
    
    if n < 4:
        raise ValueError("Cox-Stuart检验至少需要4个观测值")
    
    # 将数据分为两半
    half = n // 2
    first_half = data[:half]
    second_half = data[-half:]
    
    # 计算差值的符号
    differences = second_half - first_half
    signs = np.sign(differences)
    signs_nonzero = signs[signs != 0]
    n_pairs = len(signs_nonzero)
    
    if n_pairs == 0:
        return {
            'test_statistic': 0,
            'p_value': 1.0,
            'conclusion': '无趋势（所有配对差值为零）',
            'method': 'Cox-Stuart趋势检验(大样本近似)'
        }
    
    # 计算正号的个数
    positive_count = np.sum(signs_nonzero > 0)
    
    # 大样本近似
    expected_value = n_pairs / 2
    variance = n_pairs / 4
    
    # 连续性修正
    if positive_count > expected_value:
        z_score = (positive_count - 0.5 - expected_value) / np.sqrt(variance)
    else:
        z_score = (positive_count + 0.5 - expected_value) / np.sqrt(variance)
    
    p_value = 2 * (1 - norm.cdf(abs(z_score)))
    
    return {
        'test_statistic': int(positive_count),
        'n_pairs': int(n_pairs),
        'expected_value': float(expected_value),
        'z_score': float(z_score),
        'p_value': float(p_value),
        'method': 'Cox-Stuart趋势检验(大样本近似)'
    }

def runs_test_large_sample(data):
    """随机游程检验 - 大样本近似"""
    data = np.array(data)
    n = validate_large_sample(data)
    
    # 计算中位数
    median = np.median(data)
    
    # 转换为二进制序列
    binary_sequence = (data > median).astype(int)
    
    # 计算游程数
    runs = 1
    for i in range(1, n):
        if binary_sequence[i] != binary_sequence[i-1]:
            runs += 1
    
    # 计算正负号个数
    n_positive = np.sum(binary_sequence)
    n_negative = n - n_positive
    
    if n_positive == 0 or n_negative == 0:
        return {
            'runs': runs,
            'p_value': 1.0,
            'conclusion': '数据无变化，无法进行游程检验',
            'method': '随机游程检验(大样本近似)'
        }
    
    # 大样本近似
    expected_runs = (2 * n_positive * n_negative) / n + 1
    variance_runs = (2 * n_positive * n_negative * (2 * n_positive * n_negative - n)) / (n**2 * (n - 1))
    
    # 连续性修正
    if runs > expected_runs:
        z_score = (runs - 0.5 - expected_runs) / np.sqrt(variance_runs)
    else:
        z_score = (runs + 0.5 - expected_runs) / np.sqrt(variance_runs)
    
    p_value = 2 * (1 - norm.cdf(abs(z_score)))
    
    return {
        'runs': int(runs),
        'expected_runs': float(expected_runs),
        'n_positive': int(n_positive),
        'n_negative': int(n_negative),
        'z_score': float(z_score),
        'p_value': float(p_value),
        'method': '随机游程检验(大样本近似)'
    }

# ==================== MCP服务器定义 ====================

@server.list_tools()
async def handle_list_tools() -> list[Tool]:
    """列出可用工具"""
    return [
        Tool(
            name="sample_size_check",
            description="检查样本大小是否适合大样本近似检验 (严格要求 n > 30)",
            inputSchema={
                "type": "object",
                "properties": {
                    "data": {
                        "type": "array",
                        "items": {"type": "number"},
                        "description": "样本数据"
                    }
                },
                "required": ["data"]
            }
        ),

        Tool(
            name="wilcoxon_signed_rank",
            description="Wilcoxon符号秩检验 (大样本近似，含置信区间)",
            inputSchema={
                "type": "object",
                "properties": {
                    "data": {
                        "type": "array",
                        "items": {"type": "number"},
                        "description": "样本数据"
                    },
                    "median": {
                        "type": "number",
                        "description": "假设中位数",
                        "default": 0
                    },
                    "confidence_level": {
                        "type": "number",
                        "description": "置信水平",
                        "default": 0.95
                    }
                },
                "required": ["data"]
            }
        ),
        Tool(
            name="generalized_signed_rank",
            description="广义符号秩检验 (大样本近似)",
            inputSchema={
                "type": "object",
                "properties": {
                    "data": {
                        "type": "array",
                        "items": {"type": "number"},
                        "description": "样本数据"
                    },
                    "median": {
                        "type": "number",
                        "description": "假设中位数",
                        "default": 0
                    }
                },
                "required": ["data"]
            }
        ),
        Tool(
            name="normal_scores",
            description="正态记分检验 (大样本近似，适用于近似正态分布)",
            inputSchema={
                "type": "object",
                "properties": {
                    "data": {
                        "type": "array",
                        "items": {"type": "number"},
                        "description": "样本数据"
                    },
                    "median": {
                        "type": "number",
                        "description": "假设中位数",
                        "default": 0
                    }
                },
                "required": ["data"]
            }
        ),
        Tool(
            name="quantile_confidence_interval",
            description="分位数置信区间估计 (大样本近似)",
            inputSchema={
                "type": "object",
                "properties": {
                    "data": {
                        "type": "array",
                        "items": {"type": "number"},
                        "description": "样本数据"
                    },
                    "quantile": {
                        "type": "number",
                        "description": "分位数 (0-1之间)",
                        "default": 0.5
                    },
                    "confidence_level": {
                        "type": "number",
                        "description": "置信水平",
                        "default": 0.95
                    }
                },
                "required": ["data"]
            }
        ),
        Tool(
            name="cox_stuart_trend",
            description="Cox-Stuart趋势检验 (大样本近似)",
            inputSchema={
                "type": "object",
                "properties": {
                    "data": {
                        "type": "array",
                        "items": {"type": "number"},
                        "description": "时间序列数据"
                    }
                },
                "required": ["data"]
            }
        ),
        Tool(
            name="runs_test",
            description="随机游程检验 (大样本近似)",
            inputSchema={
                "type": "object",
                "properties": {
                    "data": {
                        "type": "array",
                        "items": {"type": "number"},
                        "description": "样本数据"
                    }
                },
                "required": ["data"]
            }
        )
    ]

@server.call_tool()
async def handle_call_tool(name: str, arguments: Dict[str, Any]) -> list[TextContent]:
    """处理工具调用"""
    try:
        if name == "sample_size_check":
            return await sample_size_check(arguments)

        elif name == "wilcoxon_signed_rank":
            return await wilcoxon_signed_rank(arguments)
        elif name == "generalized_signed_rank":
            return await generalized_signed_rank(arguments)
        elif name == "normal_scores":
            return await normal_scores(arguments)
        elif name == "quantile_confidence_interval":
            return await quantile_confidence_interval(arguments)
        elif name == "cox_stuart_trend":
            return await cox_stuart_trend(arguments)
        elif name == "runs_test":
            return await runs_test_handler(arguments)
        else:
            return [TextContent(type="text", text=f"未知工具: {name}")]
    
    except Exception as e:
        logger.error(f"工具调用失败: {e}\n{traceback.format_exc()}")
        return [TextContent(type="text", text=f"工具调用失败: {str(e)}")]

# ==================== 工具处理函数 ====================

async def sample_size_check(args: Dict[str, Any]) -> list[TextContent]:
    """检查样本大小"""
    try:
        data = np.array(args["data"])
        n = len(data)
        
        # 严格的大样本判断
        if n > 30:
            suitable = True
            recommendation = "适合使用大样本近似检验"
            warning = None
        else:
            suitable = False
            recommendation = f"样本量 {n} 不适合大样本近似检验，请使用小样本精确方法"
            warning = "大样本近似方法要求样本量严格大于30"
            
        result = {
            "sample_size": n,
            "suitable_for_large_sample": suitable,
            "recommendation": recommendation,
            "threshold": 30,
            "warning": warning
        }
        
        return [TextContent(type="text", text=json.dumps(result, ensure_ascii=False, indent=2))]
    
    except Exception as e:
        logger.error(f"样本大小检查失败: {e}")
        return [TextContent(type="text", text=f"样本大小检查失败: {str(e)}")]



async def wilcoxon_signed_rank(args: Dict[str, Any]) -> list[TextContent]:
    """Wilcoxon符号秩检验"""
    try:
        data = np.array(args["data"])
        median = args.get("median", 0)
        confidence_level = args.get("confidence_level", 0.95)
        
        # 执行检验
        test_result = wilcoxon_signed_rank_large_sample(data, median)
        
        # 计算中位数置信区间
        ci_result = quantile_confidence_interval_large_sample(data, 0.5, confidence_level)
        
        # 统计结论
        alpha = 1 - confidence_level
        is_significant = test_result['p_value'] < alpha
        conclusion = f"在α={alpha}水平下{'拒绝' if is_significant else '不拒绝'}零假设"
        
        response = {
            "test_name": "Wilcoxon符号秩检验 (大样本近似)",
            "null_hypothesis": f"中位数 = {median}",
            "test_statistic": test_result["test_statistic"],
            "z_score": test_result["z_score"],
            "p_value": test_result["p_value"],
            "point_estimate": {
                "median": float(np.median(data))
            },
            "confidence_interval": {
                "lower": ci_result["confidence_interval"]["lower"],
                "upper": ci_result["confidence_interval"]["upper"],
                "confidence_level": confidence_level
            },
            "conclusion": conclusion,
            "method_info": "大样本近似，样本量 > 30"
        }
        
        return [TextContent(type="text", text=json.dumps(response, ensure_ascii=False, indent=2))]
    
    except Exception as e:
        logger.error(f"Wilcoxon符号秩检验失败: {e}")
        return [TextContent(type="text", text=f"Wilcoxon符号秩检验失败: {str(e)}")]

async def generalized_signed_rank(args: Dict[str, Any]) -> list[TextContent]:
    """广义符号秩检验"""
    try:
        data = np.array(args["data"])
        median = args.get("median", 0)
        
        result = generalized_signed_rank_test_large_sample(data, median)
        
        alpha = 0.05
        is_significant = result['p_value'] < alpha
        conclusion = f"在α={alpha}水平下{'拒绝' if is_significant else '不拒绝'}零假设"
        
        response = {
            "test_name": "广义符号秩检验 (大样本近似)",
            "null_hypothesis": f"中位数 = {median}",
            "test_statistic": result["test_statistic"],
            "expected_value": result["expected_value"],
            "z_score": result["z_score"],
            "p_value": result["p_value"],
            "conclusion": conclusion,
            "method_info": "基于大样本正态近似，样本量 > 30"
        }
        
        return [TextContent(type="text", text=json.dumps(response, ensure_ascii=False, indent=2))]
    
    except Exception as e:
        logger.error(f"广义符号秩检验失败: {e}")
        return [TextContent(type="text", text=f"广义符号秩检验失败: {str(e)}")]

async def normal_scores(args: Dict[str, Any]) -> list[TextContent]:
    """正态记分检验"""
    try:
        data = np.array(args["data"])
        median = args.get("median", 0)
        
        # 检查数据是否适合正态记分检验
        characteristics = compute_data_characteristics(data)
        if not characteristics['is_approximately_normal']:
            warning = "警告：数据可能不满足近似正态分布假设，建议使用广义符号秩检验"
        else:
            warning = None
        
        result = normal_scores_test_large_sample(data, median)
        
        alpha = 0.05
        is_significant = result['p_value'] < alpha
        conclusion = f"在α={alpha}水平下{'拒绝' if is_significant else '不拒绝'}零假设"
        
        response = {
            "test_name": "正态记分检验 (大样本近似)",
            "null_hypothesis": f"中位数 = {median}",
            "test_statistic": result["test_statistic"],
            "expected_value": result["expected_value"],
            "z_score": result["z_score"],
            "p_value": result["p_value"],
            "conclusion": conclusion,
            "method_info": "适用于近似正态分布的数据，变异系数 < 0.3",
            "warning": warning
        }
        
        return [TextContent(type="text", text=json.dumps(response, ensure_ascii=False, indent=2))]
    
    except Exception as e:
        logger.error(f"正态记分检验失败: {e}")
        return [TextContent(type="text", text=f"正态记分检验失败: {str(e)}")]

async def quantile_confidence_interval(args: Dict[str, Any]) -> list[TextContent]:
    """分位数置信区间"""
    try:
        data = np.array(args["data"])
        quantile = args.get("quantile", 0.5)
        confidence_level = args.get("confidence_level", 0.95)
        
        # 验证参数
        if not 0 < quantile < 1:
            raise ValueError("分位数必须在0和1之间")
        if not 0 < confidence_level < 1:
            raise ValueError("置信水平必须在0和1之间")
        
        result = quantile_confidence_interval_large_sample(data, quantile, confidence_level)
        
        response = {
            "test_name": "分位数置信区间估计 (大样本近似)",
            "quantile": quantile,
            "confidence_level": confidence_level,
            "point_estimate": result["point_estimate"],
            "confidence_interval": result["confidence_interval"],
            "interpretation": f"{quantile*100}%分位数的{confidence_level*100}%置信区间为 [{result['confidence_interval']['lower']:.4f}, {result['confidence_interval']['upper']:.4f}]",
            "method_info": "基于大样本正态近似，样本量 > 30"
        }
        
        return [TextContent(type="text", text=json.dumps(response, ensure_ascii=False, indent=2))]
    
    except Exception as e:
        logger.error(f"分位数置信区间估计失败: {e}")
        return [TextContent(type="text", text=f"分位数置信区间估计失败: {str(e)}")]

async def cox_stuart_trend(args: Dict[str, Any]) -> list[TextContent]:
    """Cox-Stuart趋势检验"""
    try:
        data = np.array(args["data"])
        
        result = cox_stuart_trend_test_large_sample(data)
        
        alpha = 0.05
        is_significant = result['p_value'] < alpha
        
        if is_significant:
            if result['test_statistic'] > result['expected_value']:
                trend_direction = "上升趋势"
            else:
                trend_direction = "下降趋势"
            conclusion = f"在α={alpha}水平下检测到显著{trend_direction}"
        else:
            conclusion = f"在α={alpha}水平下未检测到显著趋势"
        
        response = {
            "test_name": "Cox-Stuart趋势检验 (大样本近似)",
            "null_hypothesis": "数据无单调趋势",
            "positive_signs": result["test_statistic"],
            "total_pairs": result["n_pairs"],
            "expected_value": result["expected_value"],
            "z_score": result["z_score"],
            "p_value": result["p_value"],
            "conclusion": conclusion,
            "method_info": "检验时间序列数据的单调趋势"
        }
        
        return [TextContent(type="text", text=json.dumps(response, ensure_ascii=False, indent=2))]
    
    except Exception as e:
        logger.error(f"Cox-Stuart趋势检验失败: {e}")
        return [TextContent(type="text", text=f"Cox-Stuart趋势检验失败: {str(e)}")]

async def runs_test_handler(args: Dict[str, Any]) -> list[TextContent]:
    """随机游程检验"""
    try:
        data = np.array(args["data"])
        
        result = runs_test_large_sample(data)
        
        alpha = 0.05
        is_significant = result['p_value'] < alpha
        conclusion = f"在α={alpha}水平下，数据序列{'不是' if is_significant else '是'}随机的"
        
        response = {
            "test_name": "随机游程检验 (大样本近似)",
            "null_hypothesis": "数据序列是随机的",
            "runs_count": result["runs"],
            "expected_runs": result["expected_runs"],
            "n_positive": result["n_positive"],
            "n_negative": result["n_negative"],
            "z_score": result["z_score"],
            "p_value": result["p_value"],
            "conclusion": conclusion,
            "method_info": "检验数据序列的随机性"
        }
        
        return [TextContent(type="text", text=json.dumps(response, ensure_ascii=False, indent=2))]
    
    except Exception as e:
        logger.error(f"随机游程检验失败: {e}")
        return [TextContent(type="text", text=f"随机游程检验失败: {str(e)}")]

# ==================== 主函数 ====================

async def main():
    """主函数：启动MCP服务器"""
    async with mcp.server.stdio.stdio_server() as (read_stream, write_stream):
        await server.run(
            read_stream,
            write_stream,
            InitializationOptions(
                server_name="large-sample-nonparametric",
                server_version="2.0.0",
                capabilities=server.get_capabilities(
                    notification_options=NotificationOptions(),
                    experimental_capabilities={},
                ),
            ),
        )

if __name__ == "__main__":
    asyncio.run(main())