#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
数据预处理MCP服务

该服务提供多种数据输入格式的预处理功能，将各种格式的数据转换为
Accurate_sample_approximation_test和Accurate_samples_exact_test服务
可识别的标准数据格式。

支持的输入格式：
- CSV文件
- JSON数据
- Excel文件
- 文本文件
- 直接数值输入
- 矩阵格式数据

输出格式：
- groups: 多组独立样本数据 (用于Kruskal-Wallis, Jonckheere-Terpstra检验)
- data: 重复测量数据矩阵 (用于Friedman, Kendall, Page, Durbin检验)
- binary_data: 二分类数据矩阵 (用于Cochran检验)
"""

import asyncio
import json
import logging
import os
import sys
from pathlib import Path
from typing import Any, Dict, List, Optional, Union, Tuple
import traceback

# 第三方库
import numpy as np
import pandas as pd
from mcp.server import Server
from mcp.server.models import InitializationOptions
from mcp.server.stdio import stdio_server
from mcp.types import Tool, TextContent

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('data_preprocessing.log', encoding='utf-8'),
        logging.StreamHandler(sys.stdout)
    ]
)
logger = logging.getLogger(__name__)

# 创建MCP服务器实例
server = Server("data-preprocessing")

class DataValidationError(Exception):
    """数据验证错误"""
    pass

class DataFormatError(Exception):
    """数据格式错误"""
    pass

class DataPreprocessor:
    """数据预处理核心类"""
    
    @staticmethod
    def validate_numeric_data(data: Any) -> bool:
        """验证数据是否为有效的数值数据"""
        try:
            if isinstance(data, (int, float)):
                return not (np.isnan(data) or np.isinf(data))
            elif isinstance(data, (list, tuple, np.ndarray)):
                return all(DataPreprocessor.validate_numeric_data(item) for item in data)
            else:
                return False
        except:
            return False
    
    @staticmethod
    def clean_numeric_data(data: List[Union[int, float, str]]) -> List[float]:
        """清理和转换数值数据"""
        cleaned_data = []
        for item in data:
            try:
                if isinstance(item, str):
                    # 处理字符串数值
                    item = item.strip()
                    if item.lower() in ['nan', 'null', 'none', '']:
                        continue  # 跳过缺失值
                    value = float(item)
                else:
                    value = float(item)
                
                if not (np.isnan(value) or np.isinf(value)):
                    cleaned_data.append(value)
            except (ValueError, TypeError):
                logger.warning(f"跳过无效数据: {item}")
                continue
        
        return cleaned_data
    
    @staticmethod
    def parse_csv_data(file_path: str, **kwargs) -> pd.DataFrame:
        """解析CSV文件"""
        try:
            # 默认参数
            default_kwargs = {
                'encoding': 'utf-8',
                'header': 0,
                'sep': ','
            }
            default_kwargs.update(kwargs)
            
            df = pd.read_csv(file_path, **default_kwargs)
            logger.info(f"成功读取CSV文件: {file_path}, 形状: {df.shape}")
            return df
        except Exception as e:
            logger.error(f"读取CSV文件失败: {e}")
            raise DataFormatError(f"CSV文件读取失败: {e}")
    
    @staticmethod
    def parse_excel_data(file_path: str, **kwargs) -> pd.DataFrame:
        """解析Excel文件"""
        try:
            # 默认参数 - 修改为正确读取行名
            default_kwargs = {
                'header': None,
                'index_col': 0,
                'sheet_name': 0
            }
            default_kwargs.update(kwargs)
            
            df = pd.read_excel(file_path, **default_kwargs)
            logger.info(f"成功读取Excel文件: {file_path}, 形状: {df.shape}")
            return df
        except Exception as e:
            logger.error(f"读取Excel文件失败: {e}")
            raise DataFormatError(f"Excel文件读取失败: {e}")
    
    @staticmethod
    def parse_text_data(file_path: str, delimiter: str = '\t') -> pd.DataFrame:
        """解析文本文件"""
        try:
            df = pd.read_csv(file_path, sep=delimiter, header=None, encoding='utf-8')
            logger.info(f"成功读取文本文件: {file_path}, 形状: {df.shape}")
            return df
        except Exception as e:
            logger.error(f"读取文本文件失败: {e}")
            raise DataFormatError(f"文本文件读取失败: {e}")
    
    @staticmethod
    def dataframe_to_groups(df: pd.DataFrame, group_columns: Optional[List[str]] = None) -> List[List[float]]:
        """将DataFrame转换为groups格式（多组独立样本）"""
        try:
            if group_columns:
                # 按指定列分组
                groups = []
                for col in group_columns:
                    if col in df.columns:
                        data = DataPreprocessor.clean_numeric_data(df[col].tolist())
                        if data:  # 只添加非空组
                            groups.append(data)
                    else:
                        logger.warning(f"列 '{col}' 不存在于数据中")
            else:
                # 将每列作为一组
                groups = []
                for col in df.columns:
                    data = DataPreprocessor.clean_numeric_data(df[col].tolist())
                    if data:  # 只添加非空组
                        groups.append(data)
            
            if not groups:
                raise DataValidationError("没有找到有效的数值数据组")
            
            logger.info(f"转换为groups格式: {len(groups)}组数据")
            return groups
        except Exception as e:
            logger.error(f"DataFrame转groups失败: {e}")
            raise DataFormatError(f"数据转换失败: {e}")
    
    @staticmethod
    def dataframe_to_matrix(df: pd.DataFrame, fill_missing: bool = True) -> List[List[float]]:
        """将DataFrame转换为matrix格式（重复测量数据）"""
        try:
            # 转换为数值矩阵
            numeric_df = df.select_dtypes(include=[np.number])
            if numeric_df.empty:
                # 尝试转换所有列为数值
                numeric_df = df.apply(pd.to_numeric, errors='coerce')
            
            if fill_missing:
                # 可以选择填充缺失值或保留NaN
                matrix = numeric_df.fillna(method='ffill').fillna(method='bfill').values.tolist()
            else:
                matrix = numeric_df.values.tolist()
            
            # 清理数据
            cleaned_matrix = []
            for row in matrix:
                cleaned_row = []
                for value in row:
                    if pd.isna(value):
                        cleaned_row.append(None)  # 保留缺失值为None
                    else:
                        cleaned_row.append(float(value))
                cleaned_matrix.append(cleaned_row)
            
            logger.info(f"转换为matrix格式: {len(cleaned_matrix)}x{len(cleaned_matrix[0]) if cleaned_matrix else 0}")
            return cleaned_matrix
        except Exception as e:
            logger.error(f"DataFrame转matrix失败: {e}")
            raise DataFormatError(f"数据转换失败: {e}")
    
    @staticmethod
    def dataframe_to_binary_matrix(df: pd.DataFrame, threshold: float = 0.5) -> List[List[int]]:
        """将DataFrame转换为二分类矩阵格式"""
        try:
            # 转换为数值矩阵
            numeric_df = df.select_dtypes(include=[np.number])
            if numeric_df.empty:
                numeric_df = df.apply(pd.to_numeric, errors='coerce')
            
            # 转换为二分类（0或1）
            binary_matrix = (numeric_df > threshold).astype(int).values.tolist()
            
            logger.info(f"转换为binary matrix格式: {len(binary_matrix)}x{len(binary_matrix[0]) if binary_matrix else 0}")
            return binary_matrix
        except Exception as e:
            logger.error(f"DataFrame转binary matrix失败: {e}")
            raise DataFormatError(f"数据转换失败: {e}")
    
    @staticmethod
    def auto_detect_format(data: Any) -> str:
        """自动检测数据格式"""
        if isinstance(data, str):
            if data.endswith(('.csv', '.CSV')):
                return 'csv'
            elif data.endswith(('.xlsx', '.xls', '.XLSX', '.XLS')):
                return 'excel'
            elif data.endswith(('.txt', '.TXT')):
                return 'text'
            else:
                return 'json_string'
        elif isinstance(data, dict):
            return 'json_dict'
        elif isinstance(data, list):
            if all(isinstance(item, list) for item in data):
                return 'matrix'
            else:
                return 'list'
        else:
            return 'unknown'

# MCP工具函数
@server.list_tools()
async def handle_list_tools() -> List[Tool]:
    """返回可用工具列表"""
    return [
        Tool(
            name="process_file_data",
            description="从文件（CSV、Excel、文本）中读取数据并转换为指定格式",
            inputSchema={
                "type": "object",
                "properties": {
                    "file_path": {
                        "type": "string",
                        "description": "文件路径（支持CSV、Excel、文本文件）"
                    },
                    "output_format": {
                        "type": "string",
                        "enum": ["groups", "matrix", "binary_matrix"],
                        "description": "输出格式：groups(多组独立样本)、matrix(重复测量矩阵)、binary_matrix(二分类矩阵)"
                    },
                    "file_options": {
                        "type": "object",
                        "description": "文件读取选项（如分隔符、编码等）",
                        "properties": {
                            "delimiter": {"type": "string", "default": ","},
                            "encoding": {"type": "string", "default": "utf-8"},
                            "header": {"type": "integer", "default": 0},
                            "sheet_name": {"type": ["string", "integer"], "default": 0}
                        }
                    },
                    "group_columns": {
                        "type": "array",
                        "items": {"type": "string"},
                        "description": "指定用作分组的列名（仅用于groups格式）"
                    },
                    "binary_threshold": {
                        "type": "number",
                        "default": 0.5,
                        "description": "二分类阈值（仅用于binary_matrix格式）"
                    }
                },
                "required": ["file_path", "output_format"]
            }
        ),
        Tool(
            name="process_direct_data",
            description="直接处理输入的数值数据并转换为指定格式",
            inputSchema={
                "type": "object",
                "properties": {
                    "data": {
                        "type": ["array", "object"],
                        "description": "输入数据（可以是列表、矩阵或JSON对象）"
                    },
                    "output_format": {
                        "type": "string",
                        "enum": ["groups", "matrix", "binary_matrix"],
                        "description": "输出格式：groups(多组独立样本)、matrix(重复测量矩阵)、binary_matrix(二分类矩阵)"
                    },
                    "data_structure": {
                        "type": "string",
                        "enum": ["auto", "groups", "matrix", "columns"],
                        "default": "auto",
                        "description": "输入数据结构：auto(自动检测)、groups(已分组)、matrix(矩阵)、columns(列数据)"
                    },
                    "binary_threshold": {
                        "type": "number",
                        "default": 0.5,
                        "description": "二分类阈值（仅用于binary_matrix格式）"
                    }
                },
                "required": ["data", "output_format"]
            }
        ),
        Tool(
            name="validate_data_format",
            description="验证数据是否符合指定MCP服务的输入要求",
            inputSchema={
                "type": "object",
                "properties": {
                    "data": {
                        "type": ["array", "object"],
                        "description": "要验证的数据"
                    },
                    "target_service": {
                        "type": "string",
                        "enum": ["exact_test", "approximation_test"],
                        "description": "目标MCP服务：exact_test(精确检验)、approximation_test(近似检验)"
                    },
                    "test_type": {
                        "type": "string",
                        "enum": [
                            "kruskal_wallis", "jonckheere_terpstra", "friedman", 
                            "kendall_concordance", "cochran_q", "page_trend", "durbin"
                        ],
                        "description": "检验类型"
                    }
                },
                "required": ["data", "target_service", "test_type"]
            }
        ),
        Tool(
            name="convert_ranking_data",
            description="转换排序数据（如用户提供的电视排名数据）为标准格式",
            inputSchema={
                "type": "object",
                "properties": {
                    "ranking_data": {
                        "type": "array",
                        "description": "排序数据矩阵，每行代表一个评价者，每列代表一个被评价对象",
                        "items": {
                            "type": "array",
                            "items": {"type": "number"}
                        }
                    },
                    "output_format": {
                        "type": "string",
                        "enum": ["matrix", "groups"],
                        "default": "matrix",
                        "description": "输出格式"
                    },
                    "evaluator_names": {
                        "type": "array",
                        "items": {"type": "string"},
                        "description": "评价者名称（可选）"
                    },
                    "object_names": {
                        "type": "array",
                        "items": {"type": "string"},
                        "description": "被评价对象名称（可选）"
                    }
                },
                "required": ["ranking_data"]
            }
        ),
        Tool(
            name="get_data_info",
            description="获取数据的基本信息和统计摘要，支持直接数据输入或文件路径输入",
            inputSchema={
                "type": "object",
                "properties": {
                    "data": {
                        "type": ["array", "object", "string"],
                        "description": "要分析的数据或文件路径（支持.xlsx、.csv、.txt文件）"
                    }
                },
                "required": ["data"]
            }
        )
    ]

@server.call_tool()
async def handle_call_tool(name: str, arguments: Dict[str, Any]) -> list[TextContent]:
    """处理工具调用"""
    try:
        tool_name = name
        args = arguments
        
        if tool_name == "process_file_data":
            return await process_file_data(args)
        elif tool_name == "process_direct_data":
            return await process_direct_data(args)
        elif tool_name == "validate_data_format":
            return await validate_data_format(args)
        elif tool_name == "convert_ranking_data":
            return await convert_ranking_data(args)
        elif tool_name == "get_data_info":
            return await get_data_info(args)
        else:
            return [TextContent(type="text", text=f"未知工具: {tool_name}")]
    
    except Exception as e:
        logger.error(f"工具调用失败: {e}\n{traceback.format_exc()}")
        return [TextContent(type="text", text=f"工具调用失败: {str(e)}")]

async def process_file_data(args: Dict[str, Any]) -> list[TextContent]:
    """处理文件数据"""
    try:
        file_path = args["file_path"]
        output_format = args["output_format"]
        file_options = args.get("file_options", {})
        group_columns = args.get("group_columns")
        binary_threshold = args.get("binary_threshold", 0.5)
        
        # 检查文件是否存在
        if not os.path.exists(file_path):
            return [TextContent(type="text", text=f"文件不存在: {file_path}")]
        
        # 根据文件扩展名选择读取方法
        file_ext = Path(file_path).suffix.lower()
        
        if file_ext in ['.csv']:
            df = DataPreprocessor.parse_csv_data(file_path, **file_options)
        elif file_ext in ['.xlsx', '.xls']:
            df = DataPreprocessor.parse_excel_data(file_path, **file_options)
        elif file_ext in ['.txt']:
            delimiter = file_options.get('delimiter', '\t')
            df = DataPreprocessor.parse_text_data(file_path, delimiter)
        else:
            return [TextContent(type="text", text=f"不支持的文件格式: {file_ext}")]
        
        # 转换为指定格式
        if output_format == "groups":
            result_data = DataPreprocessor.dataframe_to_groups(df, group_columns)
        elif output_format == "matrix":
            result_data = DataPreprocessor.dataframe_to_matrix(df)
        elif output_format == "binary_matrix":
            result_data = DataPreprocessor.dataframe_to_binary_matrix(df, binary_threshold)
        else:
            return [TextContent(type="text", text=f"不支持的输出格式: {output_format}")]
        
        # 计算总样本量
        total_sample_size = 0
        if result_data:
            if output_format == "groups":
                total_sample_size = sum(len(group) for group in result_data)
            else:
                total_sample_size = len(result_data) * len(result_data[0]) if result_data[0] else 0
        
        result = {
            "success": True,
            "output_format": output_format,
            "data": result_data,
            "data_info": {
                "original_shape": df.shape,
                "output_shape": f"{len(result_data)}x{len(result_data[0]) if result_data else 0}",
                "file_path": file_path,
                "sample_size_assessment": _calculate_sample_size_assessment(total_sample_size)
            }
        }
        
        return [TextContent(type="text", text=json.dumps(result, ensure_ascii=False, indent=2))]
        
    except Exception as e:
        logger.error(f"文件数据处理失败: {e}")
        return [TextContent(type="text", text=f"文件数据处理失败: {str(e)}")]

async def process_direct_data(args: Dict[str, Any]) -> list[TextContent]:
    """处理直接输入的数据"""
    try:
        data = args["data"]
        output_format = args["output_format"]
        data_structure = args.get("data_structure", "auto")
        binary_threshold = args.get("binary_threshold", 0.5)
        
        # 自动检测数据结构
        if data_structure == "auto":
            data_structure = DataPreprocessor.auto_detect_format(data)
        
        # 转换为DataFrame进行处理
        if isinstance(data, dict):
            df = pd.DataFrame(data)
        elif isinstance(data, list):
            if all(isinstance(item, list) for item in data):
                df = pd.DataFrame(data)
            else:
                df = pd.DataFrame({"data": data})
        else:
            return [TextContent(type="text", text="不支持的数据类型")]
        
        # 转换为指定格式
        if output_format == "groups":
            if data_structure == "groups" and isinstance(data, list) and all(isinstance(item, list) for item in data):
                # 数据已经是groups格式
                result_data = [DataPreprocessor.clean_numeric_data(group) for group in data]
            else:
                result_data = DataPreprocessor.dataframe_to_groups(df)
        elif output_format == "matrix":
            if data_structure == "matrix" and isinstance(data, list) and all(isinstance(item, list) for item in data):
                # 数据已经是matrix格式
                result_data = data
            else:
                result_data = DataPreprocessor.dataframe_to_matrix(df)
        elif output_format == "binary_matrix":
            result_data = DataPreprocessor.dataframe_to_binary_matrix(df, binary_threshold)
        else:
            return [TextContent(type="text", text=f"不支持的输出格式: {output_format}")]
        
        # 计算总样本量
        total_sample_size = 0
        if result_data:
            if output_format == "groups":
                total_sample_size = sum(len(group) for group in result_data)
            else:
                total_sample_size = len(result_data) * len(result_data[0]) if result_data[0] else 0
        
        result = {
            "success": True,
            "output_format": output_format,
            "data": result_data,
            "data_info": {
                "input_structure": data_structure,
                "output_shape": f"{len(result_data)}x{len(result_data[0]) if result_data else 0}",
                "sample_size_assessment": _calculate_sample_size_assessment(total_sample_size)
            }
        }
        
        return [TextContent(type="text", text=json.dumps(result, ensure_ascii=False, indent=2))]
        
    except Exception as e:
        logger.error(f"直接数据处理失败: {e}")
        return [TextContent(type="text", text=f"直接数据处理失败: {str(e)}")]

async def validate_data_format(args: Dict[str, Any]) -> list[TextContent]:
    """验证数据格式"""
    try:
        data = args["data"]
        target_service = args["target_service"]
        test_type = args["test_type"]
        
        validation_result = {
            "valid": True,
            "errors": [],
            "warnings": [],
            "recommendations": [],
            "sample_size_info": {}
        }
        
        # 计算总样本量
        total_sample_size = 0
        if isinstance(data, list):
            if all(isinstance(item, list) for item in data):
                # groups或matrix格式
                if test_type in ["kruskal_wallis", "jonckheere_terpstra"]:
                    # groups格式：每组的样本量之和
                    total_sample_size = sum(len(group) for group in data)
                else:
                    # matrix格式：总数据点数
                    total_sample_size = len(data) * len(data[0]) if data and data[0] else 0
            else:
                total_sample_size = len(data)
        
        # 添加样本量评估
        validation_result["sample_size_info"] = _calculate_sample_size_assessment(total_sample_size)
        
        # 根据检验类型验证数据格式
        if test_type in ["kruskal_wallis", "jonckheere_terpstra"]:
            # 需要groups格式
            if not isinstance(data, list) or not all(isinstance(group, list) for group in data):
                validation_result["valid"] = False
                validation_result["errors"].append("数据应为groups格式：[[group1], [group2], ...]")
            else:
                # 检查每组数据
                for i, group in enumerate(data):
                    if len(group) < 3:
                        validation_result["warnings"].append(f"第{i+1}组数据量较少({len(group)})，建议至少3个数据点")
                    if not all(isinstance(x, (int, float)) for x in group):
                        validation_result["errors"].append(f"第{i+1}组包含非数值数据")
                        validation_result["valid"] = False
        
        elif test_type in ["friedman", "kendall_concordance", "page_trend", "durbin"]:
            # 需要matrix格式
            if not isinstance(data, list) or not all(isinstance(row, list) for row in data):
                validation_result["valid"] = False
                validation_result["errors"].append("数据应为matrix格式：[[row1], [row2], ...]")
            else:
                # 检查矩阵维度
                if len(data) < 3:
                    validation_result["warnings"].append(f"行数较少({len(data)})，建议至少3行")
                if len(data[0]) < 3:
                    validation_result["warnings"].append(f"列数较少({len(data[0])})，建议至少3列")
                
                # 检查数据一致性
                row_lengths = [len(row) for row in data]
                if len(set(row_lengths)) > 1:
                    validation_result["errors"].append("矩阵行长度不一致")
                    validation_result["valid"] = False
        
        elif test_type == "cochran_q":
            # 需要binary_matrix格式
            if not isinstance(data, list) or not all(isinstance(row, list) for row in data):
                validation_result["valid"] = False
                validation_result["errors"].append("数据应为binary matrix格式：[[0,1,0], [1,0,1], ...]")
            else:
                # 检查是否为二分类数据
                for i, row in enumerate(data):
                    for j, value in enumerate(row):
                        if value not in [0, 1]:
                            validation_result["errors"].append(f"位置({i},{j})的值{value}不是0或1")
                            validation_result["valid"] = False
        
        # 添加服务特定的建议
        if target_service == "exact_test":
            validation_result["recommendations"].append("精确检验适用于小样本，大样本建议使用近似检验")
        else:
            validation_result["recommendations"].append("近似检验适用于大样本，小样本建议使用精确检验")
        
        return [TextContent(type="text", text=json.dumps(validation_result, ensure_ascii=False, indent=2))]
        
    except Exception as e:
        logger.error(f"数据验证失败: {e}")
        return [TextContent(type="text", text=f"数据验证失败: {str(e)}")]

async def convert_ranking_data(args: Dict[str, Any]) -> list[TextContent]:
    """转换排序数据"""
    try:
        ranking_data = args["ranking_data"]
        output_format = args.get("output_format", "matrix")
        evaluator_names = args.get("evaluator_names", [])
        object_names = args.get("object_names", [])
        
        # 验证排序数据
        if not isinstance(ranking_data, list) or not all(isinstance(row, list) for row in ranking_data):
            return [TextContent(type="text", text="排序数据应为二维列表格式")]
        
        # 清理数据
        cleaned_data = []
        for row in ranking_data:
            cleaned_row = DataPreprocessor.clean_numeric_data(row)
            if cleaned_row:
                cleaned_data.append(cleaned_row)
        
        if not cleaned_data:
            return [TextContent(type="text", text="没有有效的排序数据")]
        
        # 转换格式
        if output_format == "matrix":
            result_data = cleaned_data
        elif output_format == "groups":
            # 将每列作为一组（转置）
            result_data = list(map(list, zip(*cleaned_data)))
        else:
            return [TextContent(type="text", text=f"不支持的输出格式: {output_format}")]
        
        # 计算总样本量
        total_sample_size = len(cleaned_data) * len(cleaned_data[0]) if cleaned_data and cleaned_data[0] else 0
        
        result = {
            "success": True,
            "output_format": output_format,
            "data": result_data,
            "data_info": {
                "n_evaluators": len(cleaned_data),
                "n_objects": len(cleaned_data[0]) if cleaned_data else 0,
                "evaluator_names": evaluator_names if evaluator_names else [f"评价者{i+1}" for i in range(len(cleaned_data))],
                "object_names": object_names if object_names else [f"对象{i+1}" for i in range(len(cleaned_data[0]) if cleaned_data else 0)],
                "sample_size_assessment": _calculate_sample_size_assessment(total_sample_size)
            },
            "recommended_tests": [
                "kendall_concordance_exact",  # 检验评价者一致性
                "friedman_exact"  # 检验对象间差异
            ]
        }
        
        return [TextContent(type="text", text=json.dumps(result, ensure_ascii=False, indent=2))]
        
    except Exception as e:
        logger.error(f"排序数据转换失败: {e}")
        return [TextContent(type="text", text=f"排序数据转换失败: {str(e)}")]

def _calculate_sample_size_assessment(total_sample_size: int) -> Dict[str, Any]:
    """计算样本量评估"""
    assessment = {
        "total_sample_size": total_sample_size,
        "size_category": "",
        "recommended_service": "",
        "statistical_power": "",
        "suggestions": []
    }
    
    if total_sample_size <= 30:
        assessment["size_category"] = "小样本"
        assessment["recommended_service"] = "exact_test"
        assessment["statistical_power"] = "中等"
        assessment["suggestions"] = [
            "样本量较小，推荐使用精确检验方法",
            "精确检验能提供准确的p值，不依赖大样本假设"
        ]
    else:
        assessment["size_category"] = "大样本"
        assessment["recommended_service"] = "approximation_test"
        assessment["statistical_power"] = "高"
        assessment["suggestions"] = [
            "样本量充足，推荐使用近似检验方法",
            "近似检验计算更快，基于正态分布近似"
        ]
    
    return assessment

async def get_data_info(args: Dict[str, Any]) -> list[TextContent]:
    """获取数据信息（增强版，包含样本量评估）"""
    try:
        data = args["data"]
        
        # 检查是否为文件路径
        if isinstance(data, str) and (data.endswith('.xlsx') or data.endswith('.csv') or data.endswith('.txt')):
            # 处理文件路径输入
            import pandas as pd
            import os
            
            if not os.path.exists(data):
                return [TextContent(type="text", text=f"文件不存在: {data}")]
            
            info = {
                "input_type": "file_path",
                "file_path": data,
                "file_exists": True
            }
            
            try:
                total_sample_size = 0
                
                if data.endswith('.xlsx'):
                    # 读取Excel文件获取详细信息
                    df = pd.read_excel(data, header=None, index_col=0)
                    info["file_type"] = "Excel"
                    info["column_names"] = df.columns.tolist()
                    info["row_names"] = df.index.tolist() if hasattr(df.index, 'tolist') else None
                    info["shape"] = df.shape
                    info["data_preview"] = df.head(3).to_dict('index')  # 前3行数据预览，使用index格式保留行名
                    total_sample_size = df.shape[0] * df.shape[1]  # 总数据点数
                    
                elif data.endswith('.csv'):
                    # 读取CSV文件获取详细信息
                    df = pd.read_csv(data, header=0)
                    info["file_type"] = "CSV"
                    info["column_names"] = df.columns.tolist()
                    info["row_names"] = df.index.tolist() if not df.index.equals(pd.RangeIndex(len(df))) else None
                    info["shape"] = df.shape
                    info["data_preview"] = df.head(3).to_dict('records')  # 前3行数据预览
                    total_sample_size = df.shape[0] * df.shape[1]  # 总数据点数
                    
                else:
                    info["file_type"] = "Text"
                    with open(data, 'r', encoding='utf-8') as f:
                        lines = f.readlines()[:5]  # 读取前5行
                    info["preview_lines"] = [line.strip() for line in lines]
                    # 对于文本文件，估算行数作为样本量
                    with open(data, 'r', encoding='utf-8') as f:
                        total_sample_size = sum(1 for line in f)
                
                # 添加样本量评估
                info["sample_size_assessment"] = _calculate_sample_size_assessment(total_sample_size)
                
                # 添加使用建议
                info["usage_suggestion"] = {
                    "recommended_tool": "process_file_data",
                    "example_usage": {
                        "file_path": data,
                        "output_format": "matrix",
                        "file_options": {"header": 0}
                    }
                }
                
            except Exception as file_error:
                info["file_read_error"] = str(file_error)
                info["suggestion"] = "请检查文件格式和编码"
            
            return [TextContent(type="text", text=json.dumps(info, ensure_ascii=False, indent=2))]
        
        # 原有的数据处理逻辑（增强版）
        info = {
            "input_type": "data_array",
            "data_type": type(data).__name__,
            "structure": DataPreprocessor.auto_detect_format(data)
        }
        
        total_sample_size = 0
        
        if isinstance(data, list):
            info["length"] = len(data)
            if data and isinstance(data[0], list):
                info["shape"] = f"{len(data)}x{len(data[0])}"
                info["is_matrix"] = True
                # 统计信息
                flat_data = [item for sublist in data for item in sublist if isinstance(item, (int, float))]
                if flat_data:
                    info["statistics"] = {
                        "count": len(flat_data),
                        "min": min(flat_data),
                        "max": max(flat_data),
                        "mean": sum(flat_data) / len(flat_data)
                    }
                    total_sample_size = len(flat_data)
                else:
                    total_sample_size = len(data) * len(data[0]) if data else 0
            else:
                info["is_matrix"] = False
                numeric_data = [x for x in data if isinstance(x, (int, float))]
                if numeric_data:
                    info["statistics"] = {
                        "count": len(numeric_data),
                        "min": min(numeric_data),
                        "max": max(numeric_data),
                        "mean": sum(numeric_data) / len(numeric_data)
                    }
                    total_sample_size = len(numeric_data)
                else:
                    total_sample_size = len(data)
        
        elif isinstance(data, dict):
            info["keys"] = list(data.keys())
            info["n_keys"] = len(data.keys())
            # 对于字典，计算所有值的总数
            total_values = 0
            for value in data.values():
                if isinstance(value, list):
                    total_values += len(value)
                else:
                    total_values += 1
            total_sample_size = total_values
        
        # 添加样本量评估
        info["sample_size_assessment"] = _calculate_sample_size_assessment(total_sample_size)
        
        # 推荐的数据格式
        recommendations = []
        if info.get("is_matrix"):
            recommendations.extend(["matrix", "binary_matrix"])
        if isinstance(data, list) and all(isinstance(item, list) for item in data):
            recommendations.append("groups")
        
        info["recommended_formats"] = recommendations
        
        return [TextContent(type="text", text=json.dumps(info, ensure_ascii=False, indent=2))]
        
    except Exception as e:
        logger.error(f"获取数据信息失败: {e}")
        return [TextContent(type="text", text=f"获取数据信息失败: {str(e)}")]

async def main():
    """主函数"""
    async with stdio_server() as (read_stream, write_stream):
        await server.run(
            read_stream,
            write_stream,
            InitializationOptions(
                server_name="data-preprocessing",
                server_version="1.0.0",
                capabilities={}
            )
        )

if __name__ == "__main__":
    asyncio.run(main())