#!/usr/bin/env python3
"""
多样本位置检验 MCP 服务器 (精确检验)
提供多种非参数统计检验方法，专门针对小样本(n≤30)的精确检验
包含：Kruskal-Wallis、Jonckheere-Terpstra、Friedman、Kendall协同系数、Cochran、Page、Durbin检验
"""

import asyncio
import json
import sys
from typing import Any, Dict, List, Optional, Union, Tuple
import numpy as np
from scipy import stats
from scipy.stats import kruskal, friedmanchisquare
import itertools
import logging
from math import factorial, comb

# MCP imports
from mcp.server.models import InitializationOptions
from mcp.server import NotificationOptions, Server
from mcp.types import (
    Resource,
    Tool,
    TextContent,
    ImageContent,
    EmbeddedResource,
    LoggingLevel
)

# 配置常量
SIGNIFICANCE_LEVEL = 0.05  # 显著性水平
MAX_TOTAL_SAMPLE_SIZE = 30  # 小样本的最大总样本量
MIN_GROUP_SIZE = 2          # 最小组样本大小
MAX_GROUPS = 10             # 最大组数

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

# 自定义异常类
class StatisticsError(Exception):
    """统计操作的基础异常类"""
    pass

class InsufficientDataError(StatisticsError):
    """数据不足时引发的异常"""
    pass

class InvalidDataError(StatisticsError):
    """数据无效时引发的异常"""
    pass

class SampleSizeTooLargeError(StatisticsError):
    """样本量过大时引发的异常"""
    pass

server = Server("multi-sample-exact-tests")

def validate_small_sample_data(data_groups: List[List[float]]) -> None:
    """验证小样本多组数据的有效性
    
    Args:
        data_groups: 多组数据的列表
        
    Raises:
        InsufficientDataError: 当数据不足时
        InvalidDataError: 当数据无效时
        SampleSizeTooLargeError: 当总样本量超过30时
    """
    if not data_groups:
        raise InsufficientDataError("数据组不能为空")
        
    if len(data_groups) < 2:
        raise InsufficientDataError("至少需要2组数据")
        
    if len(data_groups) > MAX_GROUPS:
        raise InvalidDataError(f"数据组数({len(data_groups)})超过最大限制({MAX_GROUPS})")
    
    total_sample_size = 0
    for i, group in enumerate(data_groups):
        if not group:
            raise InsufficientDataError(f"组 {i+1} 数据不能为空")
            
        if len(group) < MIN_GROUP_SIZE:
            raise InsufficientDataError(f"组 {i+1} 数据量({len(group)})小于最小要求({MIN_GROUP_SIZE})")
            
        total_sample_size += len(group)
        
        # 检查NaN和无穷大值
        data_array = np.array(group)
        if np.isnan(data_array).any():
            raise InvalidDataError(f"组 {i+1} 包含NaN值")
            
        if np.isinf(data_array).any():
            raise InvalidDataError(f"组 {i+1} 包含无穷大值")
    
    if total_sample_size > MAX_TOTAL_SAMPLE_SIZE:
        raise SampleSizeTooLargeError(
            f"总样本量({total_sample_size})超过小样本限制({MAX_TOTAL_SAMPLE_SIZE})，请使用近似检验"
        )

def validate_paired_data(data_matrix: List[List[float]]) -> None:
    """验证配对数据的有效性
    
    Args:
        data_matrix: 配对数据矩阵，每行代表一个受试者，每列代表一个处理
        
    Raises:
        InsufficientDataError: 当数据不足时
        InvalidDataError: 当数据无效时
        SampleSizeTooLargeError: 当样本量过大时
    """
    if not data_matrix:
        raise InsufficientDataError("数据矩阵不能为空")
        
    n_subjects = len(data_matrix)
    if n_subjects < 2:
        raise InsufficientDataError("至少需要2个受试者")
        
    if n_subjects > MAX_TOTAL_SAMPLE_SIZE:
        raise SampleSizeTooLargeError(
            f"受试者数量({n_subjects})超过小样本限制({MAX_TOTAL_SAMPLE_SIZE})，请使用近似检验"
        )
    
    n_treatments = len(data_matrix[0]) if data_matrix else 0
    if n_treatments < 2:
        raise InsufficientDataError("至少需要2个处理")
        
    # 检查每行数据长度一致性和有效性
    for i, row in enumerate(data_matrix):
        if len(row) != n_treatments:
            raise InvalidDataError(f"受试者 {i+1} 的数据长度不一致")
            
        data_array = np.array(row)
        if np.isnan(data_array).any():
            raise InvalidDataError(f"受试者 {i+1} 包含NaN值")
            
        if np.isinf(data_array).any():
            raise InvalidDataError(f"受试者 {i+1} 包含无穷大值")

class MultiSampleExactTests:
    """多样本位置检验类（精确检验）"""
    
    @staticmethod
    def kruskal_wallis_exact(data_groups: List[List[float]], alpha: float = SIGNIFICANCE_LEVEL) -> Dict[str, Any]:
        """Kruskal-Wallis 秩和检验（精确版本）
        
        用于检验多个独立样本是否来自相同分布的总体
        适用条件：
        1. 各组样本独立
        2. 数据至少为序数尺度
        3. 各组分布形状相似（仅位置参数不同）
        
        Args:
            data_groups: 多组独立样本数据
            alpha: 显著性水平
            
        Returns:
            包含检验统计量、p值、结论等信息的字典
        """
        validate_small_sample_data(data_groups)
        
        # 合并所有数据并计算秩次
        all_data = []
        group_labels = []
        for i, group in enumerate(data_groups):
            all_data.extend(group)
            group_labels.extend([i] * len(group))
        
        # 计算秩次
        ranks = stats.rankdata(all_data)
        
        # 计算各组的秩和
        k = len(data_groups)
        n_i = [len(group) for group in data_groups]
        total_n = sum(n_i)
        
        rank_sums = []
        start_idx = 0
        for i in range(k):
            end_idx = start_idx + n_i[i]
            rank_sums.append(sum(ranks[start_idx:end_idx]))
            start_idx = end_idx
        
        # 计算H统计量
        H = 0
        for i in range(k):
            H += (rank_sums[i] ** 2) / n_i[i]
        H = (12 / (total_n * (total_n + 1))) * H - 3 * (total_n + 1)
        
        # 并列秩修正
        from collections import Counter
        ties = Counter(all_data)
        tie_correction = sum(t**3 - t for t in ties.values() if t > 1)
        if tie_correction > 0:
            H = H / (1 - tie_correction / (total_n**3 - total_n))
        
        # 精确p值计算（通过排列）
        def calculate_exact_p_value():
            """计算精确p值"""
            if total_n > 15:  # 对于较大样本，使用卡方近似
                df = k - 1
                p_value = 1 - stats.chi2.cdf(H, df)
                return p_value, False
            
            # 小样本精确计算
            observed_H = H
            count_extreme = 0
            total_permutations = 0
            
            # 生成所有可能的排列（限制计算量）
            from itertools import combinations
            
            # 计算所有可能的组合数
            total_positions = list(range(total_n))
            
            # 使用蒙特卡洛方法近似精确p值
            np.random.seed(42)
            n_simulations = min(10000, factorial(total_n) // (factorial(n_i[0]) * factorial(n_i[1]) if k == 2 else 1000))
            
            for _ in range(n_simulations):
                # 随机排列秩次
                shuffled_ranks = np.random.permutation(ranks)
                
                # 计算H统计量
                sim_rank_sums = []
                start_idx = 0
                for i in range(k):
                    end_idx = start_idx + n_i[i]
                    sim_rank_sums.append(sum(shuffled_ranks[start_idx:end_idx]))
                    start_idx = end_idx
                
                sim_H = 0
                for i in range(k):
                    sim_H += (sim_rank_sums[i] ** 2) / n_i[i]
                sim_H = (12 / (total_n * (total_n + 1))) * sim_H - 3 * (total_n + 1)
                
                # 并列秩修正
                if tie_correction > 0:
                    sim_H = sim_H / (1 - tie_correction / (total_n**3 - total_n))
                
                if sim_H >= observed_H:
                    count_extreme += 1
                total_permutations += 1
            
            p_value = count_extreme / total_permutations if total_permutations > 0 else 1.0
            return p_value, True
        
        try:
            p_value, is_exact = calculate_exact_p_value()
        except Exception as e:
            # 如果精确计算失败，使用卡方近似
            df = k - 1
            p_value = 1 - stats.chi2.cdf(H, df)
            is_exact = False
            logger.warning(f"精确p值计算失败，使用卡方近似: {str(e)}")
        
        # 计算效应量（eta-squared）
        eta_squared = (H - k + 1) / (total_n - k) if total_n > k else 0
        
        result = {
            "test_name": "Kruskal-Wallis 秩和检验（精确）",
            "statistic": float(H),
            "p_value": float(p_value),
            "alpha": float(alpha),
            "reject_null": bool(p_value < alpha),
            "eta_squared": float(max(0, eta_squared)),
            "is_exact": bool(is_exact),
            "conclusion": "拒绝原假设：各组分布存在显著差异" if p_value < alpha else "接受原假设：各组分布无显著差异",
            "sample_sizes": n_i,
            "total_sample_size": int(total_n),
            "rank_sums": [float(x) for x in rank_sums]
        }
        
        logger.info(f"Kruskal-Wallis检验完成: H={H:.4f}, p={p_value:.4f}, exact={is_exact}")
        return result
    
    @staticmethod
    def jonckheere_terpstra_exact(data_groups: List[List[float]], alpha: float = SIGNIFICANCE_LEVEL) -> Dict[str, Any]:
        """Jonckheere-Terpstra 趋势检验（精确版本）
        
        用于检验多个有序组之间是否存在单调趋势
        适用条件：
        1. 各组样本独立
        2. 组别具有自然顺序
        3. 检验单调趋势假设
        
        Args:
            data_groups: 按顺序排列的多组数据
            alpha: 显著性水平
            
        Returns:
            包含检验统计量、p值、结论等信息的字典
        """
        validate_small_sample_data(data_groups)
        
        k = len(data_groups)
        if k < 3:
            raise InsufficientDataError("Jonckheere-Terpstra检验至少需要3组数据")
        
        # 计算J统计量
        J = 0
        for i in range(k):
            for j in range(i + 1, k):
                # 计算组i和组j之间的Mann-Whitney U统计量
                for x in data_groups[i]:
                    for y in data_groups[j]:
                        if x < y:
                            J += 1
                        elif x == y:
                            J += 0.5
        
        # 计算期望值和方差（精确公式）
        n_total = sum(len(group) for group in data_groups)
        n_i = [len(group) for group in data_groups]
        
        # 期望值
        E_J = 0
        for i in range(k):
            for j in range(i + 1, k):
                E_J += n_i[i] * n_i[j] / 2
        
        # 方差计算（考虑并列秩）
        all_data = []
        group_labels = []
        for i, group in enumerate(data_groups):
            all_data.extend(group)
            group_labels.extend([i] * len(group))
        
        # 计算并列秩修正
        from collections import Counter
        ties = Counter(all_data)
        tie_correction = sum(t * (t - 1) * (t + 1) for t in ties.values() if t > 1)
        
        # 方差公式
        var_J = 0
        for i in range(k):
            for j in range(i + 1, k):
                var_J += n_i[i] * n_i[j] * (n_total + 1) / 12
        
        var_J -= tie_correction * sum(n_i[i] * n_i[j] for i in range(k) for j in range(i + 1, k)) / (12 * n_total * (n_total - 1))
        
        # 标准化统计量
        if var_J > 0:
            z_statistic = (J - E_J) / np.sqrt(var_J)
            p_value = 2 * (1 - stats.norm.cdf(abs(z_statistic)))  # 双侧检验
        else:
            z_statistic = 0
            p_value = 1.0
        
        result = {
            "test_name": "Jonckheere-Terpstra 趋势检验（精确）",
            "J_statistic": float(J),
            "expected_J": float(E_J),
            "variance_J": float(var_J),
            "z_statistic": float(z_statistic),
            "p_value": float(p_value),
            "alpha": float(alpha),
            "reject_null": bool(p_value < alpha),
            "conclusion": "拒绝原假设：存在显著单调趋势" if p_value < alpha else "接受原假设：无显著单调趋势",
            "sample_sizes": [len(group) for group in data_groups],
            "total_sample_size": int(n_total)
        }
        
        logger.info(f"Jonckheere-Terpstra检验完成: J={J}, z={z_statistic:.4f}, p={p_value:.4f}")
        return result
    
    @staticmethod
    def friedman_exact(data_matrix: List[List[float]], alpha: float = SIGNIFICANCE_LEVEL) -> Dict[str, Any]:
        """Friedman 秩和检验（精确版本）
        
        用于检验配对设计中多个处理的效应是否相同
        适用条件：
        1. 数据为配对设计（重复测量）
        2. 数据至少为序数尺度
        3. 各处理效应的分布形状相似
        
        Args:
            data_matrix: 配对数据矩阵，每行为一个受试者，每列为一个处理
            alpha: 显著性水平
            
        Returns:
            包含检验统计量、p值、结论等信息的字典
        """
        validate_paired_data(data_matrix)
        
        data_array = np.array(data_matrix)
        n_subjects, n_treatments = data_array.shape
        
        # 对每行数据进行秩次转换
        ranks = np.zeros_like(data_array)
        for i in range(n_subjects):
            ranks[i, :] = stats.rankdata(data_array[i, :])
        
        # 计算秩和
        rank_sums = np.sum(ranks, axis=0)
        
        # 计算Friedman统计量
        Q = (12 / (n_subjects * n_treatments * (n_treatments + 1))) * \
            np.sum(rank_sums**2) - 3 * n_subjects * (n_treatments + 1)
        
        # 并列秩修正
        tie_correction = 0
        for i in range(n_subjects):
            row_data = data_array[i, :]
            from collections import Counter
            ties = Counter(row_data)
            tie_correction += sum(t**3 - t for t in ties.values() if t > 1)
        
        if tie_correction > 0:
            correction_factor = 1 - tie_correction / (n_subjects * (n_treatments**3 - n_treatments))
            Q = Q / correction_factor
        
        # 精确p值计算
        def calculate_exact_p_value():
            """计算精确p值"""
            # 对于较大样本或较多处理，使用卡方近似
            if n_subjects > 10 or n_treatments > 6:
                df = n_treatments - 1
                p_value = 1 - stats.chi2.cdf(Q, df)
                return p_value, False
            
            # 小样本精确计算
            observed_Q = Q
            count_extreme = 0
            total_permutations = 0
            
            # 使用蒙特卡洛方法近似精确p值
            np.random.seed(42)
            n_simulations = min(10000, factorial(n_treatments)**n_subjects // 1000)
            
            for _ in range(n_simulations):
                # 对每个受试者随机排列处理顺序
                sim_ranks = np.zeros((n_subjects, n_treatments))
                for i in range(n_subjects):
                    # 生成随机排列的秩次
                    sim_ranks[i, :] = np.random.permutation(range(1, n_treatments + 1))
                
                # 计算模拟的秩和
                sim_rank_sums = np.sum(sim_ranks, axis=0)
                
                # 计算模拟的Friedman统计量
                sim_Q = (12 / (n_subjects * n_treatments * (n_treatments + 1))) * \
                        np.sum(sim_rank_sums**2) - 3 * n_subjects * (n_treatments + 1)
                
                if sim_Q >= observed_Q:
                    count_extreme += 1
                total_permutations += 1
            
            p_value = count_extreme / total_permutations if total_permutations > 0 else 1.0
            return p_value, True
        
        try:
            p_value, is_exact = calculate_exact_p_value()
        except Exception as e:
            # 如果精确计算失败，使用卡方近似
            df = n_treatments - 1
            p_value = 1 - stats.chi2.cdf(Q, df)
            is_exact = False
            logger.warning(f"精确p值计算失败，使用卡方近似: {str(e)}")
        
        # 计算效应量（Kendall's W）
        mean_rank_sum = np.mean(rank_sums)
        ss_rank_sums = np.sum((rank_sums - mean_rank_sum) ** 2)
        
        # 处理并列秩
        max_possible_ss = n_subjects**2 * (n_treatments**3 - n_treatments) / 12 - tie_correction / 12
        kendalls_w = ss_rank_sums / max_possible_ss if max_possible_ss > 0 else 0
        
        result = {
            "test_name": "Friedman 秩和检验（精确）",
            "statistic": float(Q),
            "p_value": float(p_value),
            "alpha": float(alpha),
            "reject_null": bool(p_value < alpha),
            "kendalls_w": float(kendalls_w),
            "is_exact": bool(is_exact),
            "conclusion": "拒绝原假设：各处理效应存在显著差异" if p_value < alpha else "接受原假设：各处理效应无显著差异",
            "n_subjects": int(n_subjects),
            "n_treatments": int(n_treatments),
            "rank_sums": [float(x) for x in rank_sums.tolist()]
        }
        
        logger.info(f"Friedman检验完成: Q={Q:.4f}, p={p_value:.4f}, W={kendalls_w:.4f}, exact={is_exact}")
        return result
    
    @staticmethod
    def kendall_coefficient_concordance(data_matrix: List[List[float]], alpha: float = SIGNIFICANCE_LEVEL) -> Dict[str, Any]:
        """Kendall 协同系数检验
        
        用于检验多个评价者对同一组对象评价的一致性
        适用条件：
        1. 多个评价者对同一组对象进行评价
        2. 评价结果至少为序数尺度
        3. 检验评价者之间的一致性
        
        Args:
            data_matrix: 评价数据矩阵，每行为一个对象，每列为一个评价者
            alpha: 显著性水平
            
        Returns:
            包含协同系数、检验统计量、p值等信息的字典
        """
        validate_paired_data(data_matrix)
        
        data_array = np.array(data_matrix)
        n_objects, n_raters = data_array.shape
        
        # 对每列（评价者）进行秩次转换
        ranks = np.zeros_like(data_array)
        for j in range(n_raters):
            ranks[:, j] = stats.rankdata(data_array[:, j])
        
        # 计算每个对象的秩和
        rank_sums = np.sum(ranks, axis=1)
        
        # 计算Kendall协同系数W
        mean_rank_sum = np.mean(rank_sums)
        ss_rank_sums = np.sum((rank_sums - mean_rank_sum) ** 2)
        
        # 处理并列秩修正
        tie_correction = 0
        for j in range(n_raters):
            col_data = data_array[:, j]
            ties = np.bincount(col_data.astype(int) if np.all(col_data == col_data.astype(int)) else stats.rankdata(col_data, method='average').astype(int))
            tie_correction += sum(t**3 - t for t in ties if t > 1)
        
        max_possible_ss = n_raters**2 * (n_objects**3 - n_objects) / 12 - n_raters * tie_correction / 12
        W = ss_rank_sums / max_possible_ss if max_possible_ss > 0 else 0
        
        # 计算检验统计量和精确p值
        chi2_statistic = n_raters * (n_objects - 1) * W
        df = n_objects - 1
        
        # 精确p值计算
        def calculate_exact_p_value():
            """计算精确p值"""
            if n_objects > 10 or n_raters > 10:
                # 大样本使用卡方近似
                p_value = 1 - stats.chi2.cdf(chi2_statistic, df)
                return p_value, False
            
            # 小样本精确计算
            observed_W = W
            count_extreme = 0
            total_permutations = 0
            
            # 使用蒙特卡洛方法近似精确p值
            np.random.seed(42)
            n_simulations = min(5000, factorial(n_objects)**n_raters // 1000)
            
            for _ in range(n_simulations):
                # 对每个评价者随机排列对象顺序
                sim_ranks = np.zeros((n_objects, n_raters))
                for j in range(n_raters):
                    sim_ranks[:, j] = np.random.permutation(range(1, n_objects + 1))
                
                # 计算模拟的秩和
                sim_rank_sums = np.sum(sim_ranks, axis=1)
                
                # 计算模拟的Kendall W
                sim_mean_rank_sum = np.mean(sim_rank_sums)
                sim_ss_rank_sums = np.sum((sim_rank_sums - sim_mean_rank_sum) ** 2)
                sim_max_possible_ss = n_raters**2 * (n_objects**3 - n_objects) / 12
                sim_W = sim_ss_rank_sums / sim_max_possible_ss if sim_max_possible_ss > 0 else 0
                
                if sim_W >= observed_W:
                    count_extreme += 1
                total_permutations += 1
            
            p_value = count_extreme / total_permutations if total_permutations > 0 else 1.0
            return p_value, True
        
        try:
            p_value, is_exact = calculate_exact_p_value()
        except Exception as e:
            # 如果精确计算失败，使用卡方近似
            p_value = 1 - stats.chi2.cdf(chi2_statistic, df)
            is_exact = False
            logger.warning(f"精确p值计算失败，使用卡方近似: {str(e)}")
        
        # 一致性强度判断
        if W >= 0.7:
            consistency_level = "强一致性"
        elif W >= 0.5:
            consistency_level = "中等一致性"
        elif W >= 0.3:
            consistency_level = "弱一致性"
        else:
            consistency_level = "几乎无一致性"
        
        result = {
            "test_name": "Kendall 协同系数检验（精确）",
            "kendalls_w": float(W),
            "chi2_statistic": float(chi2_statistic),
            "degrees_of_freedom": int(df),
            "p_value": float(p_value),
            "alpha": float(alpha),
            "reject_null": bool(p_value < alpha),
            "is_exact": bool(is_exact),
            "consistency_level": consistency_level,
            "conclusion": "拒绝原假设：评价者之间存在显著一致性" if p_value < alpha else "接受原假设：评价者之间无显著一致性",
            "n_objects": int(n_objects),
            "n_raters": int(n_raters),
            "rank_sums": [float(x) for x in rank_sums.tolist()]
        }
        
        logger.info(f"Kendall协同系数检验完成: W={W:.4f}, χ²={chi2_statistic:.4f}, p={p_value:.4f}, exact={is_exact}")
        return result
    
    @staticmethod
    def cochran_q_test(data_matrix: List[List[int]], alpha: float = SIGNIFICANCE_LEVEL) -> Dict[str, Any]:
        """Cochran Q 检验
        
        用于检验配对设计中多个二分类处理的效应是否相同
        适用条件：
        1. 数据为配对设计
        2. 因变量为二分类变量（0/1）
        3. 至少3个处理条件
        
        Args:
            data_matrix: 二分类数据矩阵，每行为一个受试者，每列为一个处理（0或1）
            alpha: 显著性水平
            
        Returns:
            包含Q统计量、p值、结论等信息的字典
        """
        validate_paired_data(data_matrix)
        
        data_array = np.array(data_matrix, dtype=int)
        n_subjects, n_treatments = data_array.shape
        
        if n_treatments < 3:
            raise InsufficientDataError("Cochran Q检验至少需要3个处理")
        
        # 检查数据是否为二分类
        if not np.all(np.isin(data_array, [0, 1])):
            raise InvalidDataError("Cochran Q检验要求数据为二分类（0或1）")
        
        # 计算各处理的成功次数
        treatment_sums = np.sum(data_array, axis=0)
        
        # 计算各受试者的成功次数
        subject_sums = np.sum(data_array, axis=1)
        
        # 计算总成功次数
        total_successes = np.sum(treatment_sums)
        
        # 计算Q统计量
        numerator = (n_treatments - 1) * (n_treatments * np.sum(treatment_sums**2) - total_successes**2)
        denominator = n_treatments * total_successes - np.sum(subject_sums**2)
        
        if denominator == 0:
            raise StatisticsError("Cochran Q检验计算失败：分母为零")
        
        Q = numerator / denominator
        
        # 精确p值计算
        def calculate_exact_p_value():
            """计算精确p值"""
            if n_subjects > 15 or n_treatments > 6:
                # 大样本使用卡方近似
                df = n_treatments - 1
                p_value = 1 - stats.chi2.cdf(Q, df)
                return p_value, False
            
            # 小样本精确计算
            observed_Q = Q
            count_extreme = 0
            total_permutations = 0
            
            # 使用蒙特卡洛方法近似精确p值
            np.random.seed(42)
            n_simulations = min(5000, 2**(n_subjects * n_treatments) // 1000)
            
            for _ in range(n_simulations):
                # 对每个受试者随机分配成功/失败
                sim_data = np.zeros((n_subjects, n_treatments), dtype=int)
                for i in range(n_subjects):
                    # 保持每个受试者的总成功次数不变
                    subject_total = subject_sums[i]
                    if subject_total > 0 and subject_total < n_treatments:
                        success_positions = np.random.choice(n_treatments, subject_total, replace=False)
                        sim_data[i, success_positions] = 1
                    elif subject_total == n_treatments:
                        sim_data[i, :] = 1
                
                # 计算模拟的Q统计量
                sim_treatment_sums = np.sum(sim_data, axis=0)
                sim_subject_sums = np.sum(sim_data, axis=1)
                sim_total_successes = np.sum(sim_treatment_sums)
                
                if sim_total_successes > 0:
                    sim_numerator = (n_treatments - 1) * (n_treatments * np.sum(sim_treatment_sums**2) - sim_total_successes**2)
                    sim_denominator = n_treatments * sim_total_successes - np.sum(sim_subject_sums**2)
                    
                    if sim_denominator > 0:
                        sim_Q = sim_numerator / sim_denominator
                        if sim_Q >= observed_Q:
                            count_extreme += 1
                
                total_permutations += 1
            
            p_value = count_extreme / total_permutations if total_permutations > 0 else 1.0
            return p_value, True
        
        try:
            p_value, is_exact = calculate_exact_p_value()
        except Exception as e:
            # 如果精确计算失败，使用卡方近似
            df = n_treatments - 1
            p_value = 1 - stats.chi2.cdf(Q, df)
            is_exact = False
            logger.warning(f"精确p值计算失败，使用卡方近似: {str(e)}")
        
        # 计算效应量（类似于eta-squared）
        eta_squared = Q / (n_subjects * n_treatments - 1) if (n_subjects * n_treatments - 1) > 0 else 0
        
        result = {
            "test_name": "Cochran Q 检验（精确）",
            "Q_statistic": float(Q),
            "degrees_of_freedom": int(n_treatments - 1),
            "p_value": float(p_value),
            "alpha": float(alpha),
            "reject_null": bool(p_value < alpha),
            "eta_squared": float(eta_squared),
            "is_exact": bool(is_exact),
            "conclusion": "拒绝原假设：各处理成功率存在显著差异" if p_value < alpha else "接受原假设：各处理成功率无显著差异",
            "n_subjects": int(n_subjects),
            "n_treatments": int(n_treatments),
            "treatment_success_rates": [float(x) for x in (treatment_sums / n_subjects).tolist()],
            "total_successes": int(total_successes)
        }
        
        logger.info(f"Cochran Q检验完成: Q={Q:.4f}, p={p_value:.4f}, exact={is_exact}")
        return result
    
    @staticmethod
    def page_test(data_matrix: List[List[float]], alpha: float = SIGNIFICANCE_LEVEL) -> Dict[str, Any]:
        """Page 趋势检验
        
        用于检验配对设计中多个有序处理是否存在单调趋势
        适用条件：
        1. 数据为配对设计
        2. 处理具有自然顺序
        3. 检验单调趋势假设
        
        Args:
            data_matrix: 配对数据矩阵，每行为一个受试者，每列为一个有序处理
            alpha: 显著性水平
            
        Returns:
            包含L统计量、p值、结论等信息的字典
        """
        validate_paired_data(data_matrix)
        
        data_array = np.array(data_matrix)
        n_subjects, n_treatments = data_array.shape
        
        if n_treatments < 3:
            raise InsufficientDataError("Page检验至少需要3个处理")
        
        # 对每行数据进行秩次转换
        ranks = np.zeros_like(data_array)
        for i in range(n_subjects):
            ranks[i, :] = stats.rankdata(data_array[i, :])
        
        # 计算L统计量
        weights = np.arange(1, n_treatments + 1)  # 权重：1, 2, 3, ..., k
        rank_sums = np.sum(ranks, axis=0)
        L = np.sum(weights * rank_sums)
        
        # 计算期望值和方差
        E_L = n_subjects * n_treatments * (n_treatments + 1)**2 / 4
        
        # 方差计算（考虑并列秩）
        tie_correction = 0
        for i in range(n_subjects):
            row_data = data_array[i, :]
            ties = np.bincount(row_data.astype(int) if np.all(row_data == row_data.astype(int)) else stats.rankdata(row_data, method='average').astype(int))
            tie_correction += sum(t**3 - t for t in ties if t > 1)
        
        var_L = (n_subjects * n_treatments * (n_treatments + 1) * 
                (n_treatments**2 + 3 * n_treatments + 2) / 144) - \
               (tie_correction * n_subjects * (n_treatments + 1) / 144)
        
        # 精确p值计算
        def calculate_exact_p_value():
            """计算精确p值"""
            if n_subjects > 12 or n_treatments > 6:
                # 大样本使用正态近似
                if var_L > 0:
                    z_statistic = (L - E_L) / np.sqrt(var_L)
                    p_value = 1 - stats.norm.cdf(z_statistic)  # 单侧检验（上尾）
                else:
                    z_statistic = 0
                    p_value = 0.5
                return p_value, z_statistic, False
            
            # 小样本精确计算
            observed_L = L
            count_extreme = 0
            total_permutations = 0
            
            # 使用蒙特卡洛方法近似精确p值
            np.random.seed(42)
            n_simulations = min(5000, factorial(n_treatments)**n_subjects // 1000)
            
            for _ in range(n_simulations):
                # 对每个受试者随机排列处理顺序
                sim_ranks = np.zeros((n_subjects, n_treatments))
                for i in range(n_subjects):
                    sim_ranks[i, :] = np.random.permutation(range(1, n_treatments + 1))
                
                # 计算模拟的L统计量
                sim_rank_sums = np.sum(sim_ranks, axis=0)
                sim_L = np.sum(weights * sim_rank_sums)
                
                if sim_L >= observed_L:
                    count_extreme += 1
                total_permutations += 1
            
            p_value = count_extreme / total_permutations if total_permutations > 0 else 1.0
            
            # 计算z统计量用于参考
            if var_L > 0:
                z_statistic = (L - E_L) / np.sqrt(var_L)
            else:
                z_statistic = 0
            
            return p_value, z_statistic, True
        
        try:
            p_value, z_statistic, is_exact = calculate_exact_p_value()
        except Exception as e:
            # 如果精确计算失败，使用正态近似
            if var_L > 0:
                z_statistic = (L - E_L) / np.sqrt(var_L)
                p_value = 1 - stats.norm.cdf(z_statistic)
            else:
                z_statistic = 0
                p_value = 0.5
            is_exact = False
            logger.warning(f"精确p值计算失败，使用正态近似: {str(e)}")
        
        result = {
            "test_name": "Page 趋势检验（精确）",
            "L_statistic": float(L),
            "expected_L": float(E_L),
            "variance_L": float(var_L),
            "z_statistic": float(z_statistic),
            "p_value": float(p_value),
            "alpha": float(alpha),
            "reject_null": bool(p_value < alpha),
            "is_exact": bool(is_exact),
            "conclusion": "拒绝原假设：存在显著上升趋势" if p_value < alpha else "接受原假设：无显著上升趋势",
            "n_subjects": int(n_subjects),
            "n_treatments": int(n_treatments),
            "rank_sums": [float(x) for x in rank_sums.tolist()],
            "weights": [int(x) for x in weights.tolist()]
        }
        
        logger.info(f"Page检验完成: L={L}, z={z_statistic:.4f}, p={p_value:.4f}, exact={is_exact}")
        return result
    
    @staticmethod
    def durbin_test(data_matrix: List[List[float]], alpha: float = SIGNIFICANCE_LEVEL) -> Dict[str, Any]:
        """Durbin 检验
        
        用于不完全区组设计中检验多个处理效应是否相同
        适用条件：
        1. 不完全区组设计（每个区组不包含所有处理）
        2. 数据至少为序数尺度
        3. 各处理效应的分布形状相似
        
        Args:
            data_matrix: 数据矩阵，缺失值用None或np.nan表示
            alpha: 显著性水平
            
        Returns:
            包含检验统计量、p值、结论等信息的字典
        """
        # 转换为numpy数组，处理缺失值
        data_array = np.array(data_matrix, dtype=float)
        n_blocks, n_treatments = data_array.shape
        
        if n_blocks < 2:
            raise InsufficientDataError("Durbin检验至少需要2个区组")
        if n_treatments < 3:
            raise InsufficientDataError("Durbin检验至少需要3个处理")
        
        # 检查总样本量
        total_observations = np.sum(~np.isnan(data_array))
        if total_observations > MAX_TOTAL_SAMPLE_SIZE:
            raise SampleSizeTooLargeError(
                f"总观测数({total_observations})超过小样本限制({MAX_TOTAL_SAMPLE_SIZE})，请使用近似检验"
            )
        
        # 对每个区组内的数据进行秩次转换（忽略缺失值）
        ranks = np.full_like(data_array, np.nan)
        for i in range(n_blocks):
            block_data = data_array[i, :]
            valid_indices = ~np.isnan(block_data)
            if np.sum(valid_indices) >= 2:  # 至少需要2个有效观测
                valid_data = block_data[valid_indices]
                block_ranks = stats.rankdata(valid_data)
                ranks[i, valid_indices] = block_ranks
        
        # 计算每个处理的秩和
        rank_sums = np.nansum(ranks, axis=0)
        
        # 计算每个处理出现的次数
        treatment_counts = np.sum(~np.isnan(ranks), axis=0)
        
        # 计算Durbin统计量
        # D = [12 / (bk(k+1))] * Σ(Rj²/rj) - 3b(k+1)
        # 其中 b=区组数, k=处理数, Rj=处理j的秩和, rj=处理j出现的次数
        
        sum_R_squared_over_r = np.sum(rank_sums**2 / treatment_counts)
        D = (12 / (n_blocks * n_treatments * (n_treatments + 1))) * sum_R_squared_over_r - 3 * n_blocks * (n_treatments + 1)
        
        # 精确p值计算
        def calculate_exact_p_value():
            """计算精确p值"""
            if total_valid_observations > 20 or n_treatments > 6:
                # 大样本使用卡方近似
                df = n_treatments - 1
                p_value = 1 - stats.chi2.cdf(D, df)
                return p_value, False
            
            # 小样本精确计算
            observed_D = D
            count_extreme = 0
            total_permutations = 0
            
            # 使用蒙特卡洛方法近似精确p值
            np.random.seed(42)
            n_simulations = min(3000, factorial(n_treatments)**n_blocks // 2000)
            
            for _ in range(n_simulations):
                # 对每个区组内的有效数据随机排列
                sim_ranks = np.full_like(data_array, np.nan)
                for i in range(n_blocks):
                    valid_indices = ~np.isnan(data_array[i, :])
                    n_valid = np.sum(valid_indices)
                    if n_valid >= 2:
                        sim_ranks[i, valid_indices] = np.random.permutation(range(1, n_valid + 1))
                
                # 计算模拟的秩和和处理计数
                sim_rank_sums = np.nansum(sim_ranks, axis=0)
                sim_treatment_counts = np.sum(~np.isnan(sim_ranks), axis=0)
                
                # 计算模拟的Durbin统计量
                if np.all(sim_treatment_counts > 0):
                    sim_sum_R_squared_over_r = np.sum(sim_rank_sums**2 / sim_treatment_counts)
                    sim_D = (12 / (n_blocks * n_treatments * (n_treatments + 1))) * sim_sum_R_squared_over_r - 3 * n_blocks * (n_treatments + 1)
                    
                    if sim_D >= observed_D:
                        count_extreme += 1
                
                total_permutations += 1
            
            p_value = count_extreme / total_permutations if total_permutations > 0 else 1.0
            return p_value, True
        
        try:
            p_value, is_exact = calculate_exact_p_value()
        except Exception as e:
            # 如果精确计算失败，使用卡方近似
            df = n_treatments - 1
            p_value = 1 - stats.chi2.cdf(D, df)
            is_exact = False
            logger.warning(f"精确p值计算失败，使用卡方近似: {str(e)}")
        
        # 计算效应量
        total_valid_observations = np.sum(treatment_counts)
        eta_squared = D / (total_valid_observations - 1) if total_valid_observations > 1 else 0
        
        result = {
            "test_name": "Durbin 检验（精确）",
            "D_statistic": float(D),
            "degrees_of_freedom": int(n_treatments - 1),
            "p_value": float(p_value),
            "alpha": float(alpha),
            "reject_null": bool(p_value < alpha),
            "eta_squared": float(eta_squared),
            "conclusion": "拒绝原假设：各处理效应存在显著差异" if p_value < alpha else "接受原假设：各处理效应无显著差异",
            "n_blocks": int(n_blocks),
            "n_treatments": int(n_treatments),
            "rank_sums": [float(x) for x in rank_sums.tolist()],
            "treatment_counts": [int(x) for x in treatment_counts.tolist()],
            "total_observations": int(total_valid_observations),
            "is_exact": is_exact
        }
        
        logger.info(f"Durbin检验完成: D={D:.4f}, p={p_value:.4f}, exact={is_exact}")
        return result
    
    @staticmethod
    def auto_select_test(data_groups: Optional[List[List[float]]] = None, 
                        data_matrix: Optional[List[List[float]]] = None,
                        design_type: str = "independent",
                        data_type: str = "continuous",
                        has_order: bool = False,
                        alpha: float = SIGNIFICANCE_LEVEL) -> Dict[str, Any]:
        """自动选择合适的多样本位置检验方法
        
        Args:
            data_groups: 独立样本数据组（用于独立设计）
            data_matrix: 配对数据矩阵（用于配对设计）
            design_type: 设计类型（"independent", "paired", "incomplete_block"）
            data_type: 数据类型（"continuous", "binary"）
            has_order: 是否具有自然顺序
            alpha: 显著性水平
            
        Returns:
            包含推荐检验方法和理由的字典
        """
        recommendations = []
        
        try:
            if design_type == "independent":
                if data_groups is None:
                    raise InvalidDataError("独立设计需要提供data_groups参数")
                
                validate_small_sample_data(data_groups)
                
                if has_order:
                    # 有序组别，推荐Jonckheere-Terpstra检验
                    result = MultiSampleExactTests.jonckheere_terpstra_exact(data_groups, alpha)
                    recommendations.append({
                        "method": "Jonckheere-Terpstra检验",
                        "reason": "独立样本设计，组别具有自然顺序，适合检验单调趋势",
                        "result": result
                    })
                else:
                    # 无序组别，推荐Kruskal-Wallis检验
                    result = MultiSampleExactTests.kruskal_wallis_exact(data_groups, alpha)
                    recommendations.append({
                        "method": "Kruskal-Wallis检验",
                        "reason": "独立样本设计，组别无特定顺序，适合检验分布差异",
                        "result": result
                    })
                    
            elif design_type == "paired":
                if data_matrix is None:
                    raise InvalidDataError("配对设计需要提供data_matrix参数")
                
                validate_paired_data(data_matrix)
                
                if data_type == "binary":
                    # 二分类数据，推荐Cochran Q检验
                    result = MultiSampleExactTests.cochran_q_test(data_matrix, alpha)
                    recommendations.append({
                        "method": "Cochran Q检验",
                        "reason": "配对设计，二分类数据，适合检验各处理成功率差异",
                        "result": result
                    })
                elif has_order:
                    # 有序处理，推荐Page检验
                    result = MultiSampleExactTests.page_test(data_matrix, alpha)
                    recommendations.append({
                        "method": "Page趋势检验",
                        "reason": "配对设计，处理具有自然顺序，适合检验单调趋势",
                        "result": result
                    })
                    
                    # 同时提供Friedman检验作为补充
                    friedman_result = MultiSampleExactTests.friedman_exact(data_matrix, alpha)
                    recommendations.append({
                        "method": "Friedman检验",
                        "reason": "配对设计的通用检验方法，检验各处理效应差异",
                        "result": friedman_result
                    })
                else:
                    # 无序处理，推荐Friedman检验
                    result = MultiSampleExactTests.friedman_exact(data_matrix, alpha)
                    recommendations.append({
                        "method": "Friedman检验",
                        "reason": "配对设计，处理无特定顺序，适合检验各处理效应差异",
                        "result": result
                    })
                    
                    # 如果需要检验一致性，推荐Kendall协同系数
                    kendall_result = MultiSampleExactTests.kendall_coefficient_concordance(data_matrix, alpha)
                    recommendations.append({
                        "method": "Kendall协同系数检验",
                        "reason": "评估多个评价者（处理）之间的一致性程度",
                        "result": kendall_result
                    })
                    
            elif design_type == "incomplete_block":
                if data_matrix is None:
                    raise InvalidDataError("不完全区组设计需要提供data_matrix参数")
                
                # 推荐Durbin检验
                result = MultiSampleExactTests.durbin_test(data_matrix, alpha)
                recommendations.append({
                    "method": "Durbin检验",
                    "reason": "不完全区组设计，每个区组不包含所有处理，适合检验处理效应差异",
                    "result": result
                })
                
            else:
                raise InvalidDataError(f"不支持的设计类型: {design_type}")
                
        except Exception as e:
            return {
                "error": str(e),
                "recommendations": []
            }
        
        return {
            "design_type": design_type,
            "data_type": data_type,
            "has_order": has_order,
            "alpha": alpha,
            "recommendations": recommendations,
            "total_recommendations": len(recommendations)
        }

# 注册工具
@server.list_tools()
async def handle_list_tools() -> List[Tool]:
    """列出所有可用的工具"""
    return [
        Tool(
            name="kruskal_wallis_exact",
            description="Kruskal-Wallis秩和检验（精确版本）- 用于检验多个独立样本是否来自相同分布的总体",
            inputSchema={
                "type": "object",
                "properties": {
                    "data_groups": {
                        "type": "array",
                        "items": {
                            "type": "array",
                            "items": {"type": "number"}
                        },
                        "description": "多组独立样本数据，每组为一个数值列表"
                    },
                    "alpha": {
                        "type": "number",
                        "default": SIGNIFICANCE_LEVEL,
                        "description": "显著性水平"
                    }
                },
                "required": ["data_groups"]
            }
        ),
        Tool(
            name="jonckheere_terpstra_exact",
            description="Jonckheere-Terpstra趋势检验（精确版本）- 用于检验多个有序组之间是否存在单调趋势",
            inputSchema={
                "type": "object",
                "properties": {
                    "data_groups": {
                        "type": "array",
                        "items": {
                            "type": "array",
                            "items": {"type": "number"}
                        },
                        "description": "按顺序排列的多组数据，每组为一个数值列表"
                    },
                    "alpha": {
                        "type": "number",
                        "default": SIGNIFICANCE_LEVEL,
                        "description": "显著性水平"
                    }
                },
                "required": ["data_groups"]
            }
        ),
        Tool(
            name="friedman_exact",
            description="Friedman秩和检验（精确版本）- 用于检验配对设计中多个处理的效应是否相同",
            inputSchema={
                "type": "object",
                "properties": {
                    "data_matrix": {
                        "type": "array",
                        "items": {
                            "type": "array",
                            "items": {"type": "number"}
                        },
                        "description": "配对数据矩阵，每行为一个受试者，每列为一个处理"
                    },
                    "alpha": {
                        "type": "number",
                        "default": SIGNIFICANCE_LEVEL,
                        "description": "显著性水平"
                    }
                },
                "required": ["data_matrix"]
            }
        ),
        Tool(
            name="kendall_coefficient_concordance",
            description="Kendall协同系数检验 - 用于检验多个评价者对同一组对象评价的一致性",
            inputSchema={
                "type": "object",
                "properties": {
                    "data_matrix": {
                        "type": "array",
                        "items": {
                            "type": "array",
                            "items": {"type": "number"}
                        },
                        "description": "评价数据矩阵，每行为一个对象，每列为一个评价者"
                    },
                    "alpha": {
                        "type": "number",
                        "default": SIGNIFICANCE_LEVEL,
                        "description": "显著性水平"
                    }
                },
                "required": ["data_matrix"]
            }
        ),
        Tool(
            name="cochran_q_test",
            description="Cochran Q检验 - 用于检验配对设计中多个二分类处理的效应是否相同",
            inputSchema={
                "type": "object",
                "properties": {
                    "data_matrix": {
                        "type": "array",
                        "items": {
                            "type": "array",
                            "items": {"type": "integer", "minimum": 0, "maximum": 1}
                        },
                        "description": "二分类数据矩阵，每行为一个受试者，每列为一个处理（0或1）"
                    },
                    "alpha": {
                        "type": "number",
                        "default": SIGNIFICANCE_LEVEL,
                        "description": "显著性水平"
                    }
                },
                "required": ["data_matrix"]
            }
        ),
        Tool(
            name="page_test",
            description="Page趋势检验 - 用于检验配对设计中多个有序处理是否存在单调趋势",
            inputSchema={
                "type": "object",
                "properties": {
                    "data_matrix": {
                        "type": "array",
                        "items": {
                            "type": "array",
                            "items": {"type": "number"}
                        },
                        "description": "配对数据矩阵，每行为一个受试者，每列为一个有序处理"
                    },
                    "alpha": {
                        "type": "number",
                        "default": SIGNIFICANCE_LEVEL,
                        "description": "显著性水平"
                    }
                },
                "required": ["data_matrix"]
            }
        ),
        Tool(
            name="durbin_test",
            description="Durbin检验 - 用于不完全区组设计中检验多个处理效应是否相同",
            inputSchema={
                "type": "object",
                "properties": {
                    "data_matrix": {
                        "type": "array",
                        "items": {
                            "type": "array",
                            "items": {"type": ["number", "null"]}
                        },
                        "description": "数据矩阵，缺失值用null表示"
                    },
                    "alpha": {
                        "type": "number",
                        "default": SIGNIFICANCE_LEVEL,
                        "description": "显著性水平"
                    }
                },
                "required": ["data_matrix"]
            }
        ),
        Tool(
            name="auto_select_test",
            description="自动选择合适的多样本位置检验方法",
            inputSchema={
                "type": "object",
                "properties": {
                    "data_groups": {
                        "type": "array",
                        "items": {
                            "type": "array",
                            "items": {"type": "number"}
                        },
                        "description": "独立样本数据组（用于独立设计）"
                    },
                    "data_matrix": {
                        "type": "array",
                        "items": {
                            "type": "array",
                            "items": {"type": ["number", "integer"]}
                        },
                        "description": "配对数据矩阵（用于配对设计）"
                    },
                    "design_type": {
                        "type": "string",
                        "enum": ["independent", "paired", "incomplete_block"],
                        "default": "independent",
                        "description": "设计类型"
                    },
                    "data_type": {
                        "type": "string",
                        "enum": ["continuous", "binary"],
                        "default": "continuous",
                        "description": "数据类型"
                    },
                    "has_order": {
                        "type": "boolean",
                        "default": False,
                        "description": "是否具有自然顺序"
                    },
                    "alpha": {
                        "type": "number",
                        "default": SIGNIFICANCE_LEVEL,
                        "description": "显著性水平"
                    }
                },
                "required": []
            }
        )
    ]

@server.call_tool()
async def handle_call_tool(name: str, arguments: Dict[str, Any]) -> List[TextContent]:
    """处理工具调用"""
    try:
        if name == "kruskal_wallis_exact":
            result = MultiSampleExactTests.kruskal_wallis_exact(
                arguments["data_groups"],
                arguments.get("alpha", SIGNIFICANCE_LEVEL)
            )
        elif name == "jonckheere_terpstra_exact":
            result = MultiSampleExactTests.jonckheere_terpstra_exact(
                arguments["data_groups"],
                arguments.get("alpha", SIGNIFICANCE_LEVEL)
            )
        elif name == "friedman_exact":
            result = MultiSampleExactTests.friedman_exact(
                arguments["data_matrix"],
                arguments.get("alpha", SIGNIFICANCE_LEVEL)
            )
        elif name == "kendall_coefficient_concordance":
            result = MultiSampleExactTests.kendall_coefficient_concordance(
                arguments["data_matrix"],
                arguments.get("alpha", SIGNIFICANCE_LEVEL)
            )
        elif name == "cochran_q_test":
            result = MultiSampleExactTests.cochran_q_test(
                arguments["data_matrix"],
                arguments.get("alpha", SIGNIFICANCE_LEVEL)
            )
        elif name == "page_test":
            result = MultiSampleExactTests.page_test(
                arguments["data_matrix"],
                arguments.get("alpha", SIGNIFICANCE_LEVEL)
            )
        elif name == "durbin_test":
            result = MultiSampleExactTests.durbin_test(
                arguments["data_matrix"],
                arguments.get("alpha", SIGNIFICANCE_LEVEL)
            )
        elif name == "auto_select_test":
            result = MultiSampleExactTests.auto_select_test(
                arguments.get("data_groups"),
                arguments.get("data_matrix"),
                arguments.get("design_type", "independent"),
                arguments.get("data_type", "continuous"),
                arguments.get("has_order", False),
                arguments.get("alpha", SIGNIFICANCE_LEVEL)
            )
        else:
            raise ValueError(f"未知的工具名称: {name}")
        
        return [TextContent(
            type="text",
            text=json.dumps(result, ensure_ascii=False, indent=2)
        )]
        
    except Exception as e:
        logger.error(f"工具调用失败 {name}: {str(e)}")
        error_result = {
            "error": str(e),
            "tool_name": name,
            "arguments": arguments
        }
        return [TextContent(
            type="text",
            text=json.dumps(error_result, ensure_ascii=False, indent=2)
        )]

async def main():
    """主函数"""
    # 使用stdio传输
    from mcp.server.stdio import stdio_server
    
    async with stdio_server() as (read_stream, write_stream):
        await server.run(
            read_stream,
            write_stream,
            InitializationOptions(
                server_name="multi-sample-exact-tests",
                server_version="1.0.0",
                capabilities=server.get_capabilities(
                    notification_options=NotificationOptions(),
                    experimental_capabilities={}
                )
            )
        )

if __name__ == "__main__":
    asyncio.run(main())