#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
MCP 集成数据处理服务

整合了以下6个MCP服务：
- data-analyzer: 数据分析服务
- data-cleaner: 数据清洗服务
- data-exporter: 数据导出服务
- data-normalizer: 数据标准化服务
- data-validator: 数据验证服务
- data-visualizer: 数据可视化服务
"""

from mcp.server.fastmcp import FastMCP
from typing import List, Dict, Optional, Union, Any, Tuple
import pandas as pd
import numpy as np
from pathlib import Path
import json
import tempfile
import os
from datetime import datetime
import warnings
warnings.filterwarnings('ignore')

# 导入各个服务的核心功能
from sklearn.ensemble import IsolationForest
from sklearn.preprocessing import (
    StandardScaler, MinMaxScaler, MaxAbsScaler, 
    RobustScaler, Normalizer, PowerTransformer
)
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.graph_objects as go
import plotly.express as px
from plotly.subplots import make_subplots
import base64
from io import BytesIO
import sqlite3
from openpyxl import Workbook
from openpyxl.styles import Font, PatternFill, Alignment
from openpyxl.utils.dataframe import dataframe_to_rows
import pickle
import re
import zipfile

# 导入额外的文件格式支持库
try:
    import pyreadstat  # 用于读取SPSS文件
except ImportError:
    print("请安装pyreadstat以支持SPSS文件: pip install pyreadstat")
    pyreadstat = None

try:
    import scipy.io  # 用于读取MATLAB文件
except ImportError:
    print("请安装scipy以支持MATLAB文件: pip install scipy")
    scipy = None

try:
    import h5py  # 用于读取HDF5文件
except ImportError:
    print("请安装h5py以支持HDF5文件: pip install h5py")
    h5py = None

try:
    import feather  # 用于读取Feather文件
except ImportError:
    print("请安装pyarrow以支持Feather文件: pip install pyarrow")
    feather = None

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei', 'Microsoft YaHei']
plt.rcParams['axes.unicode_minus'] = False

# 创建 MCP 服务实例
mcp = FastMCP("integrated-data-processing-server")

# ==================== 通用文件处理函数 ====================

def read_data_file(file_path: str, **kwargs) -> pd.DataFrame:
    """通用数据文件读取函数
    
    支持的格式:
    - CSV (.csv)
    - Excel (.xlsx, .xls)
    - JSON (.json)
    - TXT (.txt) - 分隔符文件
    - SPSS (.sav, .zsav)
    - MATLAB (.mat)
    - Parquet (.parquet)
    - Feather (.feather)
    - HDF5 (.h5, .hdf5)
    - Pickle (.pkl, .pickle)
    
    Args:
        file_path: 文件路径
        **kwargs: 额外参数
        
    Returns:
        pandas DataFrame
    """
    file_ext = Path(file_path).suffix.lower()
    
    try:
        if file_ext == '.csv':
            # CSV文件
            encoding = kwargs.get('encoding', 'utf-8')
            separator = kwargs.get('sep', ',')
            return pd.read_csv(file_path, encoding=encoding, sep=separator)
            
        elif file_ext in ['.xlsx', '.xls']:
            # Excel文件
            sheet_name = kwargs.get('sheet_name', 0)
            return pd.read_excel(file_path, sheet_name=sheet_name)
            
        elif file_ext == '.json':
            # JSON文件
            orient = kwargs.get('orient', 'records')
            return pd.read_json(file_path, orient=orient)
            
        elif file_ext == '.txt':
            # 文本文件 - 尝试自动检测分隔符
            separator = kwargs.get('sep', None)
            encoding = kwargs.get('encoding', 'utf-8')
            
            if separator is None:
                # 尝试检测分隔符
                with open(file_path, 'r', encoding=encoding) as f:
                    first_line = f.readline()
                    if '\t' in first_line:
                        separator = '\t'
                    elif ',' in first_line:
                        separator = ','
                    elif ';' in first_line:
                        separator = ';'
                    elif '|' in first_line:
                        separator = '|'
                    else:
                        separator = ' '
            
            return pd.read_csv(file_path, sep=separator, encoding=encoding)
            
        elif file_ext in ['.sav', '.zsav']:
            # SPSS文件
            if pyreadstat is None:
                raise ImportError("需要安装pyreadstat库来读取SPSS文件")
            df, meta = pyreadstat.read_sav(file_path)
            return df
            
        elif file_ext == '.mat':
            # MATLAB文件
            if scipy is None:
                raise ImportError("需要安装scipy库来读取MATLAB文件")
            
            try:
                # 尝试使用scipy读取
                mat_data = scipy.io.loadmat(file_path)
                # 查找数据变量（排除元数据）
                data_vars = {k: v for k, v in mat_data.items() 
                            if not k.startswith('__') and isinstance(v, np.ndarray)}
                
                if len(data_vars) == 1:
                    # 只有一个数据变量
                    var_name, var_data = list(data_vars.items())[0]
                    if var_data.ndim == 2:
                        return pd.DataFrame(var_data)
                    elif var_data.ndim > 2:
                        # 处理高维数组
                        rows = var_data.shape[0] * var_data.shape[1]
                        cols = np.prod(var_data.shape[2:])
                        reshaped_data = var_data.reshape(rows, cols)
                        return pd.DataFrame(reshaped_data)
                    else:
                        return pd.DataFrame({var_name: var_data.flatten()})
                else:
                    # 多个数据变量，尝试合并为DataFrame
                    df_dict = {}
                    for var_name, var_data in data_vars.items():
                        if var_data.ndim == 1:
                            df_dict[var_name] = var_data
                        elif var_data.ndim == 2:
                            if var_data.shape[1] == 1:
                                df_dict[var_name] = var_data.flatten()
                            else:
                                # 对于多列的2D数组，使用列名前缀
                                for i in range(var_data.shape[1]):
                                    df_dict[f"{var_name}_col{i+1}"] = var_data[:, i]
                        else:
                            # 对于高维数组，保存形状信息并展平
                            df_dict[f"{var_name}_shape"] = str(var_data.shape)
                            df_dict[f"{var_name}_flattened"] = var_data.flatten()
                    return pd.DataFrame(df_dict)
                    
            except Exception as e:
                # 如果scipy读取失败，尝试使用h5py读取（用于v7.3格式）
                if h5py is not None:
                    try:
                        with h5py.File(file_path, 'r') as f:
                            # 提取所有数据集
                            data_dict = {}
                            def extract_datasets(name, obj):
                                if isinstance(obj, h5py.Dataset):
                                    data = obj[()]
                                    if isinstance(data, np.ndarray):
                                        if data.ndim == 2:
                                            data_dict[name] = data
                                        else:
                                            data_dict[name] = data.flatten()
                            f.visititems(extract_datasets)
                            
                            if data_dict:
                                return pd.DataFrame(data_dict)
                            else:
                                raise ValueError("未找到可用的数据集")
                    except:
                        raise Exception(f"无法读取MAT文件: {str(e)}")
                else:
                    raise Exception(f"无法读取MAT文件: {str(e)}，如果是v7.3格式，请安装h5py")
                
        elif file_ext == '.parquet':
            # Parquet文件
            return pd.read_parquet(file_path)
            
        elif file_ext == '.feather':
            # Feather文件
            return pd.read_feather(file_path)
            
        elif file_ext in ['.h5', '.hdf5']:
            # HDF5文件
            key = kwargs.get('key', None)
            if key:
                return pd.read_hdf(file_path, key=key)
            else:
                # 尝试读取第一个可用的key
                with pd.HDFStore(file_path, 'r') as store:
                    keys = store.keys()
                    if keys:
                        return pd.read_hdf(file_path, key=keys[0])
                    else:
                        raise ValueError("HDF5文件中没有找到数据")
                        
        elif file_ext in ['.pkl', '.pickle']:
            # Pickle文件
            with open(file_path, 'rb') as f:
                data = pickle.load(f)
                if isinstance(data, pd.DataFrame):
                    return data
                elif isinstance(data, dict):
                    return pd.DataFrame(data)
                elif isinstance(data, (list, tuple)):
                    return pd.DataFrame(data)
                else:
                    raise ValueError("Pickle文件中的数据格式不支持")
                    
        else:
            raise ValueError(f"不支持的文件格式: {file_ext}")
            
    except Exception as e:
        raise Exception(f"读取文件失败 ({file_path}): {str(e)}")

def save_data_file(df: pd.DataFrame, file_path: str, **kwargs) -> bool:
    """通用数据文件保存函数
    
    支持的格式:
    - CSV (.csv)
    - Excel (.xlsx)
    - JSON (.json)
    - TXT (.txt) - 分隔符文件
    - SPSS (.sav) - 需要pyreadstat
    - MATLAB (.mat) - 需要scipy
    - Parquet (.parquet)
    - Feather (.feather)
    - HDF5 (.h5, .hdf5)
    - Pickle (.pkl, .pickle)
    
    Args:
        df: pandas DataFrame
        file_path: 输出文件路径
        **kwargs: 额外参数
        
    Returns:
        bool: 是否成功保存
    """
    file_ext = Path(file_path).suffix.lower()
    
    try:
        # 确保输出目录存在
        output_dir = os.path.dirname(file_path)
        if output_dir:
            os.makedirs(output_dir, exist_ok=True)
            
        if file_ext == '.csv':
            # CSV文件
            encoding = kwargs.get('encoding', 'utf-8')
            separator = kwargs.get('sep', ',')
            index = kwargs.get('index', False)
            df.to_csv(file_path, encoding=encoding, sep=separator, index=index)
            
        elif file_ext == '.xlsx':
            # Excel文件
            sheet_name = kwargs.get('sheet_name', 'Sheet1')
            index = kwargs.get('index', False)
            df.to_excel(file_path, sheet_name=sheet_name, index=index)
            
        elif file_ext == '.json':
            # JSON文件
            orient = kwargs.get('orient', 'records')
            indent = kwargs.get('indent', 2)
            df.to_json(file_path, orient=orient, indent=indent, force_ascii=False)
            
        elif file_ext == '.txt':
            # 文本文件
            separator = kwargs.get('sep', '\t')
            encoding = kwargs.get('encoding', 'utf-8')
            index = kwargs.get('index', False)
            df.to_csv(file_path, sep=separator, encoding=encoding, index=index)
            
        elif file_ext == '.sav':
            # SPSS文件
            if pyreadstat is None:
                raise ImportError("需要安装pyreadstat库来保存SPSS文件")
            pyreadstat.write_sav(df, file_path)
            
        elif file_ext == '.mat':
            # MATLAB文件
            if scipy is None:
                raise ImportError("需要安装scipy库来保存MATLAB文件")
            
            # 将DataFrame转换为MATLAB格式
            mat_dict = {}
            
            # 处理列名和数据
            for col in df.columns:
                # 清理列名（MATLAB变量名规则）
                clean_col = re.sub(r'[^a-zA-Z0-9_]', '_', str(col))
                if clean_col[0].isdigit():
                    clean_col = 'var_' + clean_col
                    
                # 检查是否是形状信息列
                shape_match = re.match(r'(.+)_shape$', col)
                if shape_match:
                    base_var = shape_match.group(1)
                    continue
                
                # 检查是否是展平的高维数组
                flat_match = re.match(r'(.+)_flattened$', col)
                if flat_match:
                    base_var = flat_match.group(1)
                    shape_col = f"{base_var}_shape"
                    if shape_col in df.columns:
                        try:
                            # 尝试重建原始形状
                            shape = eval(df[shape_col].iloc[0])
                            data = df[col].values
                            mat_dict[base_var] = data.reshape(shape)
                            continue
                        except:
                            pass
                
                # 检查是否是多列2D数组的一部分
                col_match = re.match(r'(.+)_col(\d+)$', col)
                if col_match:
                    base_var = col_match.group(1)
                    col_idx = int(col_match.group(2)) - 1
                    
                    # 收集所有相关列
                    related_cols = [c for c in df.columns if c.startswith(f"{base_var}_col")]
                    if col == related_cols[0]:  # 只在第一列处理
                        n_cols = len(related_cols)
                        data = np.column_stack([df[f"{base_var}_col{i+1}"].values for i in range(n_cols)])
                        mat_dict[base_var] = data
                    continue
                
                # 常规列处理
                data = df[col].values
                if data.dtype == object:
                    # 处理字符串数据
                    if all(isinstance(x, str) for x in data):
                        data = np.array([x.encode('utf-8') for x in data], dtype=object)
                mat_dict[clean_col] = data
            
            # 添加元数据
            mat_dict['__version__'] = 'MCP_1.0'
            mat_dict['__timestamp__'] = str(datetime.now())
            
            # 保存MAT文件
            scipy.io.savemat(file_path, mat_dict)
            
        elif file_ext == '.parquet':
            # Parquet文件
            df.to_parquet(file_path, index=False)
            
        elif file_ext == '.feather':
            # Feather文件
            df.to_feather(file_path)
            
        elif file_ext in ['.h5', '.hdf5']:
            # HDF5文件
            key = kwargs.get('key', 'data')
            df.to_hdf(file_path, key=key, mode='w')
            
        elif file_ext in ['.pkl', '.pickle']:
            # Pickle文件
            with open(file_path, 'wb') as f:
                pickle.dump(df, f)
                
        else:
            raise ValueError(f"不支持的输出格式: {file_ext}")
            
        return True
        
    except Exception as e:
        print(f"保存文件失败 ({file_path}): {str(e)}")
        return False

# ==================== 数据清洗服务 ====================

class DataCleaner:
    """数据清洗器"""
    
    @staticmethod
    def detect_outliers_zscore(data: pd.Series, threshold: float = 3.0) -> pd.Series:
        """使用Z-score检测异常值"""
        z_scores = np.abs(stats.zscore(data.dropna()))
        return z_scores > threshold
    
    @staticmethod
    def detect_outliers_iqr(data: pd.Series, factor: float = 1.5) -> pd.Series:
        """使用IQR检测异常值"""
        Q1 = data.quantile(0.25)
        Q3 = data.quantile(0.75)
        IQR = Q3 - Q1
        lower_bound = Q1 - factor * IQR
        upper_bound = Q3 + factor * IQR
        return (data < lower_bound) | (data > upper_bound)
    
    @staticmethod
    def detect_outliers_isolation_forest(data: pd.DataFrame, contamination: float = 0.1) -> pd.Series:
        """使用孤立森林检测异常值"""
        numeric_data = data.select_dtypes(include=[np.number])
        if numeric_data.empty:
            return pd.Series([False] * len(data), index=data.index)
        
        iso_forest = IsolationForest(contamination=contamination, random_state=42)
        outliers = iso_forest.fit_predict(numeric_data.fillna(numeric_data.mean()))
        return pd.Series(outliers == -1, index=data.index)

@mcp.tool()
def clean_missing_values(
    file_path: str,
    strategy: str = "drop",
    fill_value: Optional[Union[str, float]] = None,
    columns: Optional[List[str]] = None,
    output_path: Optional[str] = None
) -> str:
    """处理缺失值
    
    Args:
        file_path: 输入文件路径
        strategy: 处理策略 (drop, mean, median, mode, forward_fill, backward_fill, constant)
        fill_value: 当strategy为constant时的填充值
        columns: 要处理的列名列表，None表示处理所有列
        output_path: 输出文件路径，None表示覆盖原文件
        
    Returns:
        处理结果描述
    """
    try:
        # 读取数据 - 使用通用文件读取函数
        df = read_data_file(file_path)
        
        original_shape = df.shape
        missing_before = df.isnull().sum().sum()
        
        # 选择要处理的列
        if columns:
            target_columns = [col for col in columns if col in df.columns]
        else:
            target_columns = df.columns.tolist()
        
        # 处理缺失值
        if strategy == "drop":
            df = df.dropna(subset=target_columns)
        elif strategy == "mean":
            for col in target_columns:
                if df[col].dtype in ['int64', 'float64']:
                    df[col].fillna(df[col].mean(), inplace=True)
        elif strategy == "median":
            for col in target_columns:
                if df[col].dtype in ['int64', 'float64']:
                    df[col].fillna(df[col].median(), inplace=True)
        elif strategy == "mode":
            for col in target_columns:
                mode_value = df[col].mode()
                if not mode_value.empty:
                    df[col].fillna(mode_value[0], inplace=True)
        elif strategy == "forward_fill":
            df[target_columns] = df[target_columns].fillna(method='ffill')
        elif strategy == "backward_fill":
            df[target_columns] = df[target_columns].fillna(method='bfill')
        elif strategy == "constant":
            if fill_value is not None:
                df[target_columns] = df[target_columns].fillna(fill_value)
            else:
                return "错误: 使用constant策略时必须提供fill_value"
        else:
            return f"错误: 不支持的策略 {strategy}"
        
        missing_after = df.isnull().sum().sum()
        
        # 保存结果
        output_file = output_path or file_path
        save_data_file(df, output_file)
        
        return f"""缺失值处理完成:
- 原始数据形状: {original_shape}
- 处理后数据形状: {df.shape}
- 处理前缺失值: {missing_before}
- 处理后缺失值: {missing_after}
- 处理策略: {strategy}
- 输出文件: {output_file}"""
        
    except Exception as e:
        return f"处理失败: {str(e)}"

@mcp.tool()
def remove_duplicates(
    file_path: str,
    subset: Optional[List[str]] = None,
    keep: str = "first",
    output_path: Optional[str] = None
) -> str:
    """去除重复值
    
    Args:
        file_path: 输入文件路径
        subset: 用于判断重复的列名列表
        keep: 保留策略 (first, last, False)
        output_path: 输出文件路径
        
    Returns:
        处理结果描述
    """
    try:
        # 读取数据
        df = read_data_file(file_path)
        
        original_shape = df.shape
        duplicates_before = df.duplicated(subset=subset).sum()
        
        # 去除重复值
        df_cleaned = df.drop_duplicates(subset=subset, keep=keep)
        
        duplicates_removed = original_shape[0] - len(df_cleaned)
        
        # 保存结果
        output_file = output_path or file_path
        save_data_file(df_cleaned, output_file)
        
        return f"""重复值处理完成:
- 原始数据形状: {original_shape}
- 处理后数据形状: {df_cleaned.shape}
- 重复行数: {duplicates_before}
- 去除重复行数: {duplicates_removed}
- 保留策略: {keep}
- 输出文件: {output_file}"""
        
    except Exception as e:
        return f"处理失败: {str(e)}"

# ==================== 数据分析服务 ====================

class DataAnalyzer:
    """数据分析器"""
    
    def __init__(self):
        self.analysis_cache = {}
    
    def descriptive_statistics(self, df: pd.DataFrame) -> Dict[str, Any]:
        """描述性统计分析"""
        numeric_columns = df.select_dtypes(include=[np.number]).columns
        categorical_columns = df.select_dtypes(include=['object', 'category']).columns
        
        results = {
            "numeric_analysis": {},
            "categorical_analysis": {},
            "overall_summary": {
                "total_rows": len(df),
                "total_columns": len(df.columns),
                "numeric_columns": len(numeric_columns),
                "categorical_columns": len(categorical_columns),
                "missing_values_total": int(df.isnull().sum().sum())
            }
        }
        
        # 数值列分析
        for col in numeric_columns:
            series = df[col].dropna()
            if len(series) > 0:
                results["numeric_analysis"][col] = {
                    "count": int(len(series)),
                    "mean": float(series.mean()),
                    "median": float(series.median()),
                    "std": float(series.std()),
                    "min": float(series.min()),
                    "max": float(series.max()),
                    "q25": float(series.quantile(0.25)),
                    "q75": float(series.quantile(0.75)),
                    "skewness": float(stats.skew(series)),
                    "kurtosis": float(stats.kurtosis(series)),
                    "missing_count": int(df[col].isnull().sum()),
                    "missing_percentage": float(df[col].isnull().sum() / len(df) * 100)
                }
        
        # 分类列分析
        for col in categorical_columns:
            series = df[col].dropna()
            if len(series) > 0:
                value_counts = series.value_counts()
                results["categorical_analysis"][col] = {
                    "count": int(len(series)),
                    "unique_values": int(series.nunique()),
                    "most_frequent": str(value_counts.index[0]) if len(value_counts) > 0 else None,
                    "most_frequent_count": int(value_counts.iloc[0]) if len(value_counts) > 0 else 0,
                    "missing_count": int(df[col].isnull().sum()),
                    "missing_percentage": float(df[col].isnull().sum() / len(df) * 100),
                    "top_values": {
                        str(k): int(v) for k, v in value_counts.head(10).items()
                    }
                }
        
        return results

@mcp.tool()
def analyze_descriptive_statistics(
    file_path: str,
    output_path: Optional[str] = None
) -> str:
    """描述性统计分析
    
    Args:
        file_path: 输入文件路径
        output_path: 输出报告路径
        
    Returns:
        描述性统计分析结果
    """
    try:
        # 读取数据
        df = read_data_file(file_path)
        
        analyzer = DataAnalyzer()
        results = analyzer.descriptive_statistics(df)
        
        # 生成报告
        report = {
            "file_path": file_path,
            "analysis_time": datetime.now().isoformat(),
            "analysis_type": "descriptive_statistics",
            "results": results
        }
        
        # 保存报告
        if output_path:
            with open(output_path, 'w', encoding='utf-8') as f:
                json.dump(report, f, indent=2, ensure_ascii=False)
        
        # 生成摘要
        summary = f"""描述性统计分析完成:
- 数据形状: {results['overall_summary']['total_rows']} 行 × {results['overall_summary']['total_columns']} 列
- 数值列: {results['overall_summary']['numeric_columns']} 个
- 分类列: {results['overall_summary']['categorical_columns']} 个
- 缺失值总数: {results['overall_summary']['missing_values_total']}"""
        
        if output_path:
            summary += f"\n- 详细报告: {output_path}"
        
        return summary
        
    except Exception as e:
        return f"描述性统计分析失败: {str(e)}"

# ==================== 数据验证服务 ====================

class DataValidator:
    """数据验证器"""
    
    def __init__(self):
        self.validation_rules = {}
        self.quality_metrics = {}
    
    def calculate_quality_metrics(self, df: pd.DataFrame) -> Dict[str, Any]:
        """计算数据质量指标"""
        metrics = {
            "total_rows": len(df),
            "total_columns": len(df.columns),
            "missing_values": {},
            "duplicate_rows": int(df.duplicated().sum()),
            "data_types": {},
            "memory_usage": float(df.memory_usage(deep=True).sum() / 1024 / 1024)  # MB
        }
        
        # 缺失值统计
        for column in df.columns:
            missing_count = df[column].isnull().sum()
            metrics["missing_values"][column] = {
                "count": int(missing_count),
                "percentage": float(missing_count / len(df) * 100)
            }
        
        # 数据类型统计
        for column in df.columns:
            metrics["data_types"][column] = str(df[column].dtype)
        
        # 整体质量评分
        missing_rate = df.isnull().sum().sum() / (len(df) * len(df.columns))
        duplicate_rate = metrics["duplicate_rows"] / len(df) if len(df) > 0 else 0
        
        quality_score = max(0, 100 - (missing_rate * 50) - (duplicate_rate * 30))
        metrics["quality_score"] = float(quality_score)
        
        return metrics

@mcp.tool()
def validate_data_quality(
    file_path: str,
    output_path: Optional[str] = None
) -> str:
    """数据质量检查
    
    Args:
        file_path: 输入文件路径
        output_path: 输出报告路径
        
    Returns:
        数据质量检查结果
    """
    try:
        # 读取数据
        df = read_data_file(file_path)
        
        validator = DataValidator()
        metrics = validator.calculate_quality_metrics(df)
        
        # 生成详细报告
        report = {
            "file_info": {
                "file_path": file_path,
                "file_size_mb": float(Path(file_path).stat().st_size / 1024 / 1024),
                "validation_time": datetime.now().isoformat()
            },
            "data_overview": {
                "rows": metrics["total_rows"],
                "columns": metrics["total_columns"],
                "memory_usage_mb": metrics["memory_usage"]
            },
            "quality_metrics": metrics,
            "recommendations": []
        }
        
        # 生成建议
        if metrics["duplicate_rows"] > 0:
            report["recommendations"].append(f"发现 {metrics['duplicate_rows']} 行重复数据，建议去重")
        
        high_missing_columns = []
        for col, missing_info in metrics["missing_values"].items():
            if missing_info["percentage"] > 50:
                high_missing_columns.append(f"{col} ({missing_info['percentage']:.1f}%)")
        
        if high_missing_columns:
            report["recommendations"].append(f"以下列缺失值过多: {', '.join(high_missing_columns)}")
        
        if metrics["quality_score"] < 70:
            report["recommendations"].append("数据质量较低，建议进行清洗")
        
        # 保存报告
        if output_path:
            with open(output_path, 'w', encoding='utf-8') as f:
                json.dump(report, f, indent=2, ensure_ascii=False)
        
        # 生成摘要
        summary = f"""数据质量检查完成:
- 数据形状: {metrics['total_rows']} 行 × {metrics['total_columns']} 列
- 质量评分: {metrics['quality_score']:.1f}/100
- 缺失值总数: {sum(info['count'] for info in metrics['missing_values'].values())}
- 重复行数: {metrics['duplicate_rows']}
- 内存使用: {metrics['memory_usage']:.2f} MB
- 建议数量: {len(report['recommendations'])}"""
        
        if output_path:
            summary += f"\n- 详细报告: {output_path}"
        
        return summary
        
    except Exception as e:
        return f"数据质量检查失败: {str(e)}"

# ==================== 数据标准化服务 ====================

class DataNormalizer:
    """数据标准化器"""
    
    def __init__(self):
        self.scalers = {}
        self.fitted_scalers = {}
    
    def fit_transform(self, data: pd.DataFrame, method: str, **kwargs) -> Tuple[pd.DataFrame, Any]:
        """拟合并转换数据"""
        if method == "zscore":
            scaler = StandardScaler()
        elif method == "minmax":
            feature_range = kwargs.get('feature_range', (0, 1))
            scaler = MinMaxScaler(feature_range=feature_range)
        elif method == "maxabs":
            scaler = MaxAbsScaler()
        elif method == "robust":
            quantile_range = kwargs.get('quantile_range', (25.0, 75.0))
            scaler = RobustScaler(quantile_range=quantile_range)
        elif method == "unit_vector":
            norm = kwargs.get('norm', 'l2')
            scaler = Normalizer(norm=norm)
        elif method == "power":
            method_param = kwargs.get('power_method', 'yeo-johnson')
            scaler = PowerTransformer(method=method_param)
        else:
            raise ValueError(f"不支持的标准化方法: {method}")
        
        transformed_data = scaler.fit_transform(data)
        transformed_df = pd.DataFrame(transformed_data, columns=data.columns, index=data.index)
        
        return transformed_df, scaler

@mcp.tool()
def zscore_normalize(
    file_path: str,
    columns: Optional[List[str]] = None,
    output_path: Optional[str] = None,
    save_scaler: bool = False,
    scaler_path: Optional[str] = None
) -> str:
    """Z-score标准化
    
    Args:
        file_path: 输入文件路径
        columns: 要标准化的列名列表，None表示处理所有数值列
        output_path: 输出文件路径
        save_scaler: 是否保存标准化器
        scaler_path: 标准化器保存路径
        
    Returns:
        标准化结果描述
    """
    try:
        # 读取数据
        df = read_data_file(file_path)
        
        # 选择数值列
        numeric_columns = df.select_dtypes(include=[np.number]).columns.tolist()
        if columns:
            target_columns = [col for col in columns if col in numeric_columns]
        else:
            target_columns = numeric_columns
        
        if not target_columns:
            return "错误: 没有找到可用的数值列"
        
        # 标准化
        normalizer = DataNormalizer()
        df_normalized = df.copy()
        
        normalized_data, scaler = normalizer.fit_transform(
            df[target_columns], "zscore"
        )
        df_normalized[target_columns] = normalized_data
        
        # 计算统计信息
        stats_info = {}
        for col in target_columns:
            original_mean = df[col].mean()
            original_std = df[col].std()
            normalized_mean = df_normalized[col].mean()
            normalized_std = df_normalized[col].std()
            
            stats_info[col] = {
                "original_mean": float(original_mean),
                "original_std": float(original_std),
                "normalized_mean": float(normalized_mean),
                "normalized_std": float(normalized_std)
            }
        
        # 保存结果
        output_file = output_path or file_path
        save_data_file(df_normalized, output_file)
        
        # 保存标准化器
        if save_scaler and scaler_path:
            with open(scaler_path, 'wb') as f:
                pickle.dump(scaler, f)
        
        # 生成报告
        report = f"Z-score标准化完成:\n"
        report += f"- 处理列数: {len(target_columns)}\n"
        report += f"- 处理列: {', '.join(target_columns)}\n"
        for col, stats in stats_info.items():
            report += f"- {col}: 均值 {stats['original_mean']:.3f}→{stats['normalized_mean']:.3f}, 标准差 {stats['original_std']:.3f}→{stats['normalized_std']:.3f}\n"
        report += f"- 输出文件: {output_file}"
        
        if save_scaler and scaler_path:
            report += f"\n- 标准化器已保存: {scaler_path}"
        
        return report
        
    except Exception as e:
        return f"Z-score标准化失败: {str(e)}"

@mcp.tool()
def minmax_normalize(
    file_path: str,
    columns: Optional[List[str]] = None,
    feature_range: Tuple[float, float] = (0, 1),
    output_path: Optional[str] = None,
    save_scaler: bool = False,
    scaler_path: Optional[str] = None
) -> str:
    """Min-Max标准化
    
    Args:
        file_path: 输入文件路径
        columns: 要标准化的列名列表，None表示处理所有数值列
        feature_range: 特征范围，默认(0, 1)
        output_path: 输出文件路径
        save_scaler: 是否保存标准化器
        scaler_path: 标准化器保存路径
        
    Returns:
        标准化结果描述
    """
    try:
        # 读取数据
        df = read_data_file(file_path)
        
        # 选择数值列
        numeric_columns = df.select_dtypes(include=[np.number]).columns.tolist()
        if columns:
            target_columns = [col for col in columns if col in numeric_columns]
        else:
            target_columns = numeric_columns
        
        if not target_columns:
            return "错误: 没有找到可用的数值列"
        
        # 标准化
        normalizer = DataNormalizer()
        df_normalized = df.copy()
        
        normalized_data, scaler = normalizer.fit_transform(
            df[target_columns], "minmax", feature_range=feature_range
        )
        df_normalized[target_columns] = normalized_data
        
        # 计算统计信息
        stats_info = {}
        for col in target_columns:
            original_min = df[col].min()
            original_max = df[col].max()
            normalized_min = df_normalized[col].min()
            normalized_max = df_normalized[col].max()
            
            stats_info[col] = {
                "original_min": float(original_min),
                "original_max": float(original_max),
                "normalized_min": float(normalized_min),
                "normalized_max": float(normalized_max)
            }
        
        # 保存结果
        output_file = output_path or file_path
        save_data_file(df_normalized, output_file)
        
        # 保存标准化器
        if save_scaler and scaler_path:
            with open(scaler_path, 'wb') as f:
                pickle.dump(scaler, f)
        
        # 生成报告
        report = f"Min-Max标准化完成:\n"
        report += f"- 处理列数: {len(target_columns)}\n"
        report += f"- 处理列: {', '.join(target_columns)}\n"
        report += f"- 特征范围: {feature_range}\n"
        for col, stats in stats_info.items():
            report += f"- {col}: 范围 [{stats['original_min']:.3f}, {stats['original_max']:.3f}]→[{stats['normalized_min']:.3f}, {stats['normalized_max']:.3f}]\n"
        report += f"- 输出文件: {output_file}"
        
        if save_scaler and scaler_path:
            report += f"\n- 标准化器已保存: {scaler_path}"
        
        return report
        
    except Exception as e:
        return f"Min-Max标准化失败: {str(e)}"

@mcp.tool()
def robust_normalize(
    file_path: str,
    columns: Optional[List[str]] = None,
    quantile_range: Tuple[float, float] = (25.0, 75.0),
    output_path: Optional[str] = None,
    save_scaler: bool = False,
    scaler_path: Optional[str] = None
) -> str:
    """Robust标准化
    
    Args:
        file_path: 输入文件路径
        columns: 要标准化的列名列表，None表示处理所有数值列
        quantile_range: 分位数范围，默认(25.0, 75.0)
        output_path: 输出文件路径
        save_scaler: 是否保存标准化器
        scaler_path: 标准化器保存路径
        
    Returns:
        标准化结果描述
    """
    try:
        # 读取数据
        df = read_data_file(file_path)
        
        # 选择数值列
        numeric_columns = df.select_dtypes(include=[np.number]).columns.tolist()
        if columns:
            target_columns = [col for col in columns if col in numeric_columns]
        else:
            target_columns = numeric_columns
        
        if not target_columns:
            return "错误: 没有找到可用的数值列"
        
        # 标准化
        normalizer = DataNormalizer()
        df_normalized = df.copy()
        
        normalized_data, scaler = normalizer.fit_transform(
            df[target_columns], "robust", quantile_range=quantile_range
        )
        df_normalized[target_columns] = normalized_data
        
        # 计算统计信息
        stats_info = {}
        for col in target_columns:
            original_median = df[col].median()
            original_q25 = df[col].quantile(0.25)
            original_q75 = df[col].quantile(0.75)
            normalized_median = df_normalized[col].median()
            normalized_iqr = df_normalized[col].quantile(0.75) - df_normalized[col].quantile(0.25)
            
            stats_info[col] = {
                "original_median": float(original_median),
                "original_iqr": float(original_q75 - original_q25),
                "normalized_median": float(normalized_median),
                "normalized_iqr": float(normalized_iqr)
            }
        
        # 保存结果
        output_file = output_path or file_path
        save_data_file(df_normalized, output_file)
        
        # 保存标准化器
        if save_scaler and scaler_path:
            with open(scaler_path, 'wb') as f:
                pickle.dump(scaler, f)
        
        # 生成报告
        report = f"Robust标准化完成:\n"
        report += f"- 处理列数: {len(target_columns)}\n"
        report += f"- 处理列: {', '.join(target_columns)}\n"
        report += f"- 分位数范围: {quantile_range}\n"
        for col, stats in stats_info.items():
            report += f"- {col}: 中位数 {stats['original_median']:.3f}→{stats['normalized_median']:.3f}, IQR {stats['original_iqr']:.3f}→{stats['normalized_iqr']:.3f}\n"
        report += f"- 输出文件: {output_file}"
        
        if save_scaler and scaler_path:
            report += f"\n- 标准化器已保存: {scaler_path}"
        
        return report
        
    except Exception as e:
        return f"Robust标准化失败: {str(e)}"

@mcp.tool()
def unit_vector_normalize(
    file_path: str,
    columns: Optional[List[str]] = None,
    norm: str = 'l2',
    output_path: Optional[str] = None,
    save_scaler: bool = False,
    scaler_path: Optional[str] = None
) -> str:
    """Unit Vector标准化
    
    Args:
        file_path: 输入文件路径
        columns: 要标准化的列名列表，None表示处理所有数值列
        norm: 范数类型，'l1', 'l2', 'max'
        output_path: 输出文件路径
        save_scaler: 是否保存标准化器
        scaler_path: 标准化器保存路径
        
    Returns:
        标准化结果描述
    """
    try:
        # 读取数据
        df = read_data_file(file_path)
        
        # 选择数值列
        numeric_columns = df.select_dtypes(include=[np.number]).columns.tolist()
        if columns:
            target_columns = [col for col in columns if col in numeric_columns]
        else:
            target_columns = numeric_columns
        
        if not target_columns:
            return "错误: 没有找到可用的数值列"
        
        # 标准化
        normalizer = DataNormalizer()
        df_normalized = df.copy()
        
        normalized_data, scaler = normalizer.fit_transform(
            df[target_columns], "unit_vector", norm=norm
        )
        df_normalized[target_columns] = normalized_data
        
        # 计算统计信息
        stats_info = {}
        for col in target_columns:
            original_norm = np.linalg.norm(df[col].values)
            normalized_norm = np.linalg.norm(df_normalized[col].values)
            
            stats_info[col] = {
                "original_norm": float(original_norm),
                "normalized_norm": float(normalized_norm),
                "norm_type": norm
            }
        
        # 保存结果
        output_file = output_path or file_path
        save_data_file(df_normalized, output_file)
        
        # 保存标准化器
        if save_scaler and scaler_path:
            with open(scaler_path, 'wb') as f:
                pickle.dump(scaler, f)
        
        # 生成报告
        report = f"Unit Vector标准化完成:\n"
        report += f"- 处理列数: {len(target_columns)}\n"
        report += f"- 处理列: {', '.join(target_columns)}\n"
        report += f"- 范数类型: {norm}\n"
        for col, stats in stats_info.items():
            report += f"- {col}: 原始范数 {stats['original_norm']:.3f}→标准化范数 {stats['normalized_norm']:.3f}\n"
        report += f"- 输出文件: {output_file}"
        
        if save_scaler and scaler_path:
            report += f"\n- 标准化器已保存: {scaler_path}"
        
        return report
        
    except Exception as e:
        return f"Unit Vector标准化失败: {str(e)}"

# ==================== 数据可视化服务 ====================

class DataVisualizer:
    """数据可视化器"""
    
    def __init__(self):
        self.color_palette = px.colors.qualitative.Set3
        self.figure_size = (12, 8)
    
    def _save_plot_as_base64(self) -> str:
        """将当前图表保存为base64字符串"""
        buffer = BytesIO()
        plt.savefig(buffer, format='png', dpi=300, bbox_inches='tight')
        buffer.seek(0)
        image_base64 = base64.b64encode(buffer.getvalue()).decode()
        plt.close()
        return image_base64
    
    def create_histogram(self, df: pd.DataFrame, column: str, bins: int = 30) -> str:
        """创建直方图"""
        plt.figure(figsize=self.figure_size)
        
        data = df[column].dropna()
        plt.hist(data, bins=bins, alpha=0.7, color='skyblue', edgecolor='black')
        plt.title(f'{column} 分布直方图', fontsize=16)
        plt.xlabel(column, fontsize=12)
        plt.ylabel('频次', fontsize=12)
        plt.grid(True, alpha=0.3)
        
        # 添加统计信息
        mean_val = data.mean()
        median_val = data.median()
        plt.axvline(mean_val, color='red', linestyle='--', label=f'均值: {mean_val:.2f}')
        plt.axvline(median_val, color='green', linestyle='--', label=f'中位数: {median_val:.2f}')
        plt.legend()
        
        return self._save_plot_as_base64()

@mcp.tool()
def create_histogram(
    file_path: str,
    column: str,
    bins: int = 30,
    output_path: Optional[str] = None
) -> str:
    """创建直方图
    
    Args:
        file_path: 输入文件路径
        column: 要绘制的列名
        bins: 直方图的箱数
        output_path: 输出图片路径
        
    Returns:
        图表创建结果
    """
    try:
        # 读取数据
        df = read_data_file(file_path)
        
        if column not in df.columns:
            return f"错误: 列 '{column}' 不存在"
        
        if not pd.api.types.is_numeric_dtype(df[column]):
            return f"错误: 列 '{column}' 不是数值类型"
        
        visualizer = DataVisualizer()
        image_base64 = visualizer.create_histogram(df, column, bins)
        
        # 保存图片
        if output_path:
            image_data = base64.b64decode(image_base64)
            with open(output_path, 'wb') as f:
                f.write(image_data)
        
        # 统计信息
        data = df[column].dropna()
        stats_info = f"""直方图创建完成:
- 列名: {column}
- 数据点数: {len(data)}
- 箱数: {bins}
- 均值: {data.mean():.2f}
- 中位数: {data.median():.2f}
- 标准差: {data.std():.2f}"""
        
        if output_path:
            stats_info += f"\n- 图片保存至: {output_path}"
        
        return stats_info
        
    except Exception as e:
        return f"直方图创建失败: {str(e)}"

# ==================== 数据导出服务 ====================

class DataExporter:
    """数据导出器"""
    
    def __init__(self):
        self.supported_formats = ['csv', 'xlsx', 'json', 'parquet', 'sqlite', 'html', 'xml']
        self.compression_formats = ['zip', 'gzip', 'bz2']
    
    def export_to_csv(self, df: pd.DataFrame, output_path: str, **kwargs) -> bool:
        """导出为CSV格式"""
        try:
            df.to_csv(output_path, index=False, encoding='utf-8-sig', **kwargs)
            return True
        except Exception as e:
            raise Exception(f"CSV导出失败: {str(e)}")
    
    def export_to_excel(self, df: pd.DataFrame, output_path: str, sheet_name: str = 'Sheet1', 
                       with_formatting: bool = True) -> bool:
        """导出为Excel格式"""
        try:
            if with_formatting:
                # 创建工作簿和工作表
                wb = Workbook()
                ws = wb.active
                ws.title = sheet_name
                
                # 添加数据
                for r in dataframe_to_rows(df, index=False, header=True):
                    ws.append(r)
                
                # 设置标题样式
                header_font = Font(bold=True, color="FFFFFF")
                header_fill = PatternFill(start_color="366092", end_color="366092", fill_type="solid")
                header_alignment = Alignment(horizontal="center", vertical="center")
                
                for cell in ws[1]:
                    cell.font = header_font
                    cell.fill = header_fill
                    cell.alignment = header_alignment
                
                # 自动调整列宽
                for column in ws.columns:
                    max_length = 0
                    column_letter = column[0].column_letter
                    for cell in column:
                        try:
                            if len(str(cell.value)) > max_length:
                                max_length = len(str(cell.value))
                        except:
                            pass
                    adjusted_width = min(max_length + 2, 50)
                    ws.column_dimensions[column_letter].width = adjusted_width
                
                wb.save(output_path)
            else:
                df.to_excel(output_path, sheet_name=sheet_name, index=False)
            
            return True
        except Exception as e:
            raise Exception(f"Excel导出失败: {str(e)}")
    
    def _export_by_format(self, df: pd.DataFrame, output_path: str, format_type: str) -> bool:
        """根据格式导出数据"""
        if format_type == 'csv':
            return self.export_to_csv(df, output_path)
        elif format_type == 'xlsx':
            return self.export_to_excel(df, output_path)
        elif format_type == 'json':
            df.to_json(output_path, orient='records', force_ascii=False, indent=2)
            return True
        else:
            raise ValueError(f"不支持的导出格式: {format_type}")

@mcp.tool()
def export_data(
    file_path: str,
    output_path: str,
    format_type: str = "csv",
    sheet_name: Optional[str] = None
) -> str:
    """导出数据到指定格式
    
    Args:
        file_path: 输入文件路径
        output_path: 输出文件路径
        format_type: 导出格式 (csv, xlsx, json)
        sheet_name: Excel工作表名称（仅用于xlsx格式）
        
    Returns:
        导出结果
    """
    try:
        # 读取数据
        df = read_data_file(file_path)
        
        exporter = DataExporter()
        
        # 确保输出目录存在
        output_dir = os.path.dirname(output_path)
        if output_dir:
            os.makedirs(output_dir, exist_ok=True)
        
        # 根据格式导出 - 使用通用保存函数
        if format_type == 'xlsx' and sheet_name:
            success = save_data_file(df, output_path, sheet_name=sheet_name)
        else:
            success = save_data_file(df, output_path)
        
        if success:
            file_size = os.path.getsize(output_path) / 1024 / 1024  # MB
            
            result = f"""数据导出成功:
- 输入文件: {file_path}
- 输出文件: {output_path}
- 导出格式: {format_type.upper()}
- 数据行数: {len(df)}
- 数据列数: {len(df.columns)}
- 文件大小: {file_size:.2f} MB"""
            
            if sheet_name:
                result += f"\n- 工作表名: {sheet_name}"
            
            return result
        else:
            return "数据导出失败"
        
    except Exception as e:
        return f"数据导出失败: {str(e)}"

# ==================== 综合处理工具 ====================

@mcp.tool()
def process_high_dim_mat(
    file_path: str,
    output_dir: str,
    process_type: str = "convert",
    channel_prefix: str = "channel",
    validate_output: bool = True
) -> str:
    """处理高维MAT文件
    
    Args:
        file_path: 输入MAT文件路径
        output_dir: 输出目录路径
        process_type: 处理类型 (convert, analyze)
        channel_prefix: 输出通道文件前缀
        validate_output: 是否验证输出文件
        
    Returns:
        处理结果描述
    """
    try:
        # 确保输出目录存在
        os.makedirs(output_dir, exist_ok=True)
        
        # 尝试读取MAT文件
        try:
            mat_data = scipy.io.loadmat(file_path)
        except Exception as e:
            # 如果scipy.io失败，尝试使用h5py
            try:
                with h5py.File(file_path, 'r') as f:
                    mat_data = {}
                    for k, v in f.items():
                        mat_data[k] = np.array(v)
            except Exception as h5_e:
                return f"无法读取MAT文件: scipy.io错误: {str(e)}; h5py错误: {str(h5_e)}"
        
        # 移除系统变量
        data_vars = {k: v for k, v in mat_data.items() if not k.startswith('__')}
        if not data_vars:
            return "文件中没有找到有效数据变量"
            
        results = []
        processed_files = []
        
        # 处理每个变量
        for var_name, var_data in data_vars.items():
            if not isinstance(var_data, np.ndarray):
                continue
                
            if process_type == "convert":
                # 处理不同维度的数组
                if var_data.ndim == 2:
                    # 2D数组直接保存为CSV
                    output_path = os.path.join(output_dir, f"{var_name}.csv")
                    pd.DataFrame(var_data).to_csv(output_path, index=False)
                    processed_files.append(output_path)
                    results.append(f"2D数组 {var_name} 已保存为: {output_path}")
                    
                elif var_data.ndim == 3:
                    # 3D数组，将每个通道保存为单独的CSV
                    for i in range(var_data.shape[2]):
                        output_path = os.path.join(output_dir, f"{var_name}_{channel_prefix}_{i}.csv")
                        pd.DataFrame(var_data[:,:,i]).to_csv(output_path, index=False)
                        processed_files.append(output_path)
                    results.append(f"3D数组 {var_name} 的 {var_data.shape[2]} 个通道已保存")
                    
                else:
                    results.append(f"跳过 {var_data.ndim}D 数组 {var_name}")
            
            elif process_type == "analyze":
                results.append(f"\n变量 {var_name}:")
                results.append(f"- 形状: {var_data.shape}")
                results.append(f"- 维度: {var_data.ndim}")
                results.append(f"- 数据类型: {var_data.dtype}")
                results.append(f"- 元素总数: {var_data.size}")
                
                if var_data.size > 0 and np.issubdtype(var_data.dtype, np.number):
                    results.append(f"- 最小值: {np.min(var_data)}")
                    results.append(f"- 最大值: {np.max(var_data)}")
                    results.append(f"- 平均值: {np.mean(var_data)}")
                    results.append(f"- 标准差: {np.std(var_data)}")
                    results.append(f"- 非零元素: {np.count_nonzero(var_data)}")
                    results.append(f"- 唯一值数量: {len(np.unique(var_data))}")
        
        # 验证输出文件
        if process_type == "convert" and validate_output and processed_files:
            validation_results = []
            for file_path in processed_files:
                if os.path.exists(file_path):
                    file_size = os.path.getsize(file_path) / 1024  # KB
                    validation_results.append(f"✓ {os.path.basename(file_path)} ({file_size:.1f} KB)")
                else:
                    validation_results.append(f"✗ {os.path.basename(file_path)} (文件缺失)")
            results.append("\n输出文件验证:")
            results.extend(validation_results)
        
        return "\n".join(results)
        
    except Exception as e:
        return f"处理失败: {str(e)}"

@mcp.tool()
def process_mat_file(
    file_path: str,
    output_path: Optional[str] = None,
    process_type: str = "analyze",
    variables: Optional[List[str]] = None,
    output_format: str = "mat"
) -> str:
    """处理MAT文件
    
    Args:
        file_path: 输入文件路径
        output_path: 输出文件路径
        process_type: 处理类型 (analyze, transform, extract)
        variables: 要处理的变量列表
        output_format: 输出格式 (mat, csv, json)
        
    Returns:
        处理结果描述
    """
    try:
        # 读取MAT文件
        mat_data = scipy.io.loadmat(file_path)
        
        # 提取所有非元数据变量
        data_vars = {}
        for key, value in mat_data.items():
            if not key.startswith('__'):
                data_vars[key] = value
        
        if not data_vars:
            return "文件中没有找到有效数据"
        
        # 选择要处理的变量
        if variables:
            data_vars = {k: v for k, v in data_vars.items() if k in variables}
        
        results = []
        
        # 根据处理类型执行不同操作
        if process_type == "analyze":
            # 分析每个变量
            for var_name, var_data in data_vars.items():
                if isinstance(var_data, np.ndarray):
                    results.append(f"\n变量 {var_name}:")
                    results.append(f"- 形状: {var_data.shape}")
                    results.append(f"- 维度: {var_data.ndim}")
                    results.append(f"- 数据类型: {var_data.dtype}")
                    
                    if var_data.size > 0:
                        results.append(f"- 最小值: {np.min(var_data)}")
                        results.append(f"- 最大值: {np.max(var_data)}")
                        results.append(f"- 平均值: {np.mean(var_data)}")
                        results.append(f"- 标准差: {np.std(var_data)}")
                        results.append(f"- 非零元素: {np.count_nonzero(var_data)}")
                        
        elif process_type == "transform":
            # 转换数据并保存
            if output_path:
                transformed_data = {}
                for var_name, var_data in data_vars.items():
                    if isinstance(var_data, np.ndarray):
                        # 对高维数组进行重塑
                        if var_data.ndim > 2:
                            rows = var_data.shape[0] * var_data.shape[1]
                            cols = np.prod(var_data.shape[2:])
                            reshaped_data = var_data.reshape(rows, cols)
                            transformed_data[var_name] = reshaped_data
                        else:
                            transformed_data[var_name] = var_data
                
                # 根据输出格式保存
                if output_format == 'mat':
                    scipy.io.savemat(output_path, transformed_data)
                    results.append(f"数据已转换并保存为MAT文件: {output_path}")
                elif output_format == 'csv':
                    for var_name, var_data in transformed_data.items():
                        csv_path = output_path.replace('.mat', f'_{var_name}.csv')
                        pd.DataFrame(var_data).to_csv(csv_path, index=False)
                        results.append(f"变量 {var_name} 已保存为CSV: {csv_path}")
                elif output_format == 'json':
                    json_data = {}
                    for var_name, var_data in transformed_data.items():
                        json_data[var_name] = var_data.tolist()
                    with open(output_path, 'w') as f:
                        json.dump(json_data, f)
                    results.append(f"数据已转换并保存为JSON: {output_path}")
            else:
                results.append("错误: 转换模式需要指定输出路径")
                
        elif process_type == "extract":
            # 提取数据并保存为CSV
            if output_path:
                for var_name, var_data in data_vars.items():
                    if isinstance(var_data, np.ndarray):
                        if var_data.ndim <= 2:
                            csv_path = output_path.replace('.mat', f'_{var_name}.csv')
                            pd.DataFrame(var_data).to_csv(csv_path, index=False)
                            results.append(f"变量 {var_name} 已提取到: {csv_path}")
                        else:
                            results.append(f"变量 {var_name} 维度过高 ({var_data.ndim}), 已跳过")
            else:
                results.append("错误: 提取模式需要指定输出路径")
        
        return "\n".join(results)
        
    except Exception as e:
        return f"处理失败: {str(e)}"

@mcp.tool()
def process_high_dim_data(
    file_path: str,
    output_dir: str,
    process_type: str = "analyze",
    normalize_data: bool = True,
    normalize_method: str = "zscore",
    validate_quality: bool = True,
    export_format: str = "csv"
) -> str:
    """处理高维数据文件
    
    Args:
        file_path: 输入文件路径
        output_dir: 输出目录路径
        process_type: 处理类型 (analyze, process)
        normalize_data: 是否标准化数据
        normalize_method: 标准化方法
        validate_quality: 是否进行质量检查
        export_format: 导出格式
        
    Returns:
        处理结果描述
    """
    try:
        # 确保输出目录存在
        os.makedirs(output_dir, exist_ok=True)
        
        # 读取数据
        if file_path.lower().endswith('.mat'):
            # 对于MAT文件，使用专门的处理函数
            df = None
            try:
                mat_data = scipy.io.loadmat(file_path)
                # 获取第一个非系统变量
                data_vars = {k: v for k, v in mat_data.items() if not k.startswith('__')}
                if data_vars:
                    first_var = next(iter(data_vars.values()))
                    if first_var.ndim == 2:
                        df = pd.DataFrame(first_var)
                    elif first_var.ndim == 3:
                        # 将3D数组展平为2D
                        rows = first_var.shape[0] * first_var.shape[1]
                        cols = first_var.shape[2]
                        df = pd.DataFrame(first_var.reshape(rows, cols))
            except Exception as e:
                try:
                    with h5py.File(file_path, 'r') as f:
                        first_key = next(iter(f.keys()))
                        data = np.array(f[first_key])
                        if data.ndim == 2:
                            df = pd.DataFrame(data)
                        elif data.ndim == 3:
                            rows = data.shape[0] * data.shape[1]
                            cols = data.shape[2]
                            df = pd.DataFrame(data.reshape(rows, cols))
                except Exception as h5_e:
                    return f"无法读取MAT文件: scipy.io错误: {str(e)}; h5py错误: {str(h5_e)}"
        else:
            # 对于其他格式文件，使用通用读取函数
            df = read_data_file(file_path)
        
        if df is None or df.empty:
            return "无法读取数据或数据为空"
            
        original_shape = df.shape
        processing_steps = []
        
        # 1. 数据质量检查
        if validate_quality:
            validator = DataValidator()
            quality_metrics = validator.calculate_quality_metrics(df)
            processing_steps.append(f"质量评分: {quality_metrics['quality_score']:.1f}/100")
            
            # 添加详细的质量指标
            processing_steps.append("质量指标:")
            for metric, value in quality_metrics.items():
                if metric != 'quality_score':
                    processing_steps.append(f"  - {metric}: {value}")
        
        # 2. 数据标准化
        if normalize_data:
            numeric_columns = df.select_dtypes(include=[np.number]).columns.tolist()
            if numeric_columns:
                normalizer = DataNormalizer()
                normalized_data, scaler = normalizer.fit_transform(df[numeric_columns], normalize_method)
                df[numeric_columns] = normalized_data
                processing_steps.append(f"数据标准化: {normalize_method} 方法")
                
                # 保存标准化器
                scaler_path = os.path.join(output_dir, 'scaler.pkl')
                with open(scaler_path, 'wb') as f:
                    pickle.dump(scaler, f)
                processing_steps.append(f"标准化器已保存: {scaler_path}")
        
        # 3. 导出处理后的数据
        if process_type == "process":
            # 导出数据
            output_path = os.path.join(output_dir, f"processed_data.{export_format}")
            success = save_data_file(df, output_path)
            
            if success:
                file_size = os.path.getsize(output_path) / 1024 / 1024  # MB
                processing_steps.append(f"数据已保存: {output_path} ({file_size:.2f} MB)")
                
                # 生成数据描述文件
                description = {
                    'original_shape': list(original_shape),
                    'processed_shape': list(df.shape),
                    'data_types': {col: str(dtype) for col, dtype in df.dtypes.items()},
                    'processing_steps': processing_steps
                }
                
                desc_path = os.path.join(output_dir, 'data_description.json')
                with open(desc_path, 'w', encoding='utf-8') as f:
                    json.dump(description, f, indent=2, ensure_ascii=False)
                processing_steps.append(f"数据描述已保存: {desc_path}")
            else:
                return "数据导出失败"
        
        # 生成结果报告
        result = f"""高维数据处理完成:
- 原始数据形状: {original_shape}
- 处理后数据形状: {df.shape}
- 处理步骤: {len(processing_steps)} 个
"""
        
        for step in processing_steps:
            result += f"  • {step}\n"
            
        return result
        
    except Exception as e:
        return f"处理失败: {str(e)}"

@mcp.tool()
def comprehensive_data_processing(
    file_path: str,
    output_path: str,
    clean_missing: bool = True,
    missing_strategy: str = "drop",
    remove_duplicates_flag: bool = True,
    normalize_data: bool = False,
    normalize_method: str = "zscore",
    validate_quality: bool = True,
    export_format: str = "csv"
) -> str:
    """综合数据处理
    
    Args:
        file_path: 输入文件路径
        output_path: 输出文件路径
        clean_missing: 是否处理缺失值
        missing_strategy: 缺失值处理策略
        remove_duplicates_flag: 是否去除重复值
        normalize_data: 是否标准化数据
        normalize_method: 标准化方法
        validate_quality: 是否进行质量检查
        export_format: 导出格式
        
    Returns:
        综合处理结果
    """
    try:
        # 读取数据
        df = read_data_file(file_path)
        
        original_shape = df.shape
        processing_steps = []
        
        # 1. 数据质量检查
        if validate_quality:
            validator = DataValidator()
            quality_metrics = validator.calculate_quality_metrics(df)
            processing_steps.append(f"质量评分: {quality_metrics['quality_score']:.1f}/100")
        
        # 2. 处理缺失值
        if clean_missing:
            missing_before = df.isnull().sum().sum()
            if missing_strategy == "drop":
                df = df.dropna()
            elif missing_strategy == "mean":
                numeric_cols = df.select_dtypes(include=[np.number]).columns
                for col in numeric_cols:
                    df[col].fillna(df[col].mean(), inplace=True)
            missing_after = df.isnull().sum().sum()
            processing_steps.append(f"缺失值处理: {missing_before} → {missing_after}")
        
        # 3. 去除重复值
        if remove_duplicates_flag:
            duplicates_before = df.duplicated().sum()
            df = df.drop_duplicates()
            duplicates_removed = duplicates_before
            processing_steps.append(f"去除重复值: {duplicates_removed} 行")
        
        # 4. 数据标准化
        if normalize_data:
            numeric_columns = df.select_dtypes(include=[np.number]).columns.tolist()
            if numeric_columns:
                normalizer = DataNormalizer()
                normalized_data, _ = normalizer.fit_transform(df[numeric_columns], normalize_method)
                df[numeric_columns] = normalized_data
                processing_steps.append(f"数据标准化: {normalize_method} 方法")
        
        # 5. 导出数据
        # 确保输出目录存在
        output_dir = os.path.dirname(output_path)
        if output_dir:
            os.makedirs(output_dir, exist_ok=True)
        
        success = save_data_file(df, output_path)
        
        if success:
            file_size = os.path.getsize(output_path) / 1024 / 1024  # MB
            
            result = f"""综合数据处理完成:
- 原始数据形状: {original_shape}
- 处理后数据形状: {df.shape}
- 处理步骤: {len(processing_steps)} 个
"""
            
            for step in processing_steps:
                result += f"  • {step}\n"
            
            result += f"""- 输出文件: {output_path}
- 导出格式: {export_format.upper()}
- 文件大小: {file_size:.2f} MB"""
            
            return result
        else:
            return "综合数据处理失败"
        
    except Exception as e:
        return f"综合数据处理失败: {str(e)}"

# ==================== 资源定义 ====================

@mcp.resource("data://{file_path}")
def get_data_resource(file_path: str) -> str:
    """获取数据文件预览
    
    Args:
        file_path: 文件路径
        
    Returns:
        数据预览信息
    """
    try:
        df = read_data_file(file_path)
        
        preview = f"""数据文件预览: {file_path}

基本信息:
- 数据形状: {df.shape[0]} 行 × {df.shape[1]} 列
- 内存使用: {df.memory_usage(deep=True).sum() / 1024 / 1024:.2f} MB
- 缺失值总数: {df.isnull().sum().sum()}

列信息:
{df.dtypes.to_string()}

数据预览 (前5行):
{df.head().to_string()}

数值列统计:
{df.describe().to_string() if len(df.select_dtypes(include=[np.number]).columns) > 0 else '无数值列'}"""
        
        return preview
        
    except Exception as e:
        return f"获取数据预览失败: {str(e)}"

@mcp.tool()
def process_spss_file(
    file_path: str,
    output_path: Optional[str] = None,
    process_type: str = "analyze",
    clean_missing: bool = True,
    missing_strategy: str = "mean",
    normalize_method: Optional[str] = None,
    visualize: bool = True,
    output_format: str = "csv"
) -> str:
    """处理SPSS文件
    
    Args:
        file_path: 输入文件路径
        output_path: 输出文件路径
        process_type: 处理类型 (analyze, transform, clean)
        clean_missing: 是否清理缺失值
        missing_strategy: 缺失值处理策略 (mean, median, mode, drop)
        normalize_method: 标准化方法 (None, zscore, minmax, robust)
        visualize: 是否生成可视化
        output_format: 输出格式 (csv, xlsx, sav)
        
    Returns:
        处理结果描述
    """
    try:
        # 检查pyreadstat是否可用
        if pyreadstat is None:
            return "错误: 需要安装pyreadstat库来处理SPSS文件"
            
        # 读取SPSS文件
        df, meta = pyreadstat.read_sav(file_path)
        results = []
        results.append(f"成功读取SPSS文件: {file_path}")
        results.append(f"- 数据形状: {df.shape}")
        results.append(f"- 变量数量: {len(df.columns)}")
        
        # 数据清理
        if clean_missing:
            missing_counts = df.isnull().sum()
            total_missing = missing_counts.sum()
            results.append(f"\n缺失值统计:")
            results.append(f"- 总缺失值: {total_missing}")
            
            if total_missing > 0:
                results.append("- 各变量缺失值:")
                for col, count in missing_counts[missing_counts > 0].items():
                    results.append(f"  * {col}: {count}")
                
                # 处理缺失值
                if missing_strategy == "mean":
                    df = df.fillna(df.mean())
                elif missing_strategy == "median":
                    df = df.fillna(df.median())
                elif missing_strategy == "mode":
                    df = df.fillna(df.mode().iloc[0])
                elif missing_strategy == "drop":
                    df = df.dropna()
                results.append(f"- 使用{missing_strategy}策略处理缺失值")
        
        # 数据标准化
        if normalize_method:
            numeric_cols = df.select_dtypes(include=[np.number]).columns
            if len(numeric_cols) > 0:
                if normalize_method == "zscore":
                    scaler = StandardScaler()
                elif normalize_method == "minmax":
                    scaler = MinMaxScaler()
                elif normalize_method == "robust":
                    scaler = RobustScaler()
                    
                df[numeric_cols] = scaler.fit_transform(df[numeric_cols])
                results.append(f"\n使用{normalize_method}方法标准化数值变量")
        
        # 数据分析
        if process_type == "analyze":
            results.append("\n描述性统计:")
            desc_stats = df.describe()
            results.append(str(desc_stats))
            
            # 相关性分析
            numeric_cols = df.select_dtypes(include=[np.number]).columns
            if len(numeric_cols) > 1:
                corr = df[numeric_cols].corr()
                results.append("\n变量相关性:")
                results.append(str(corr))
        
        # 数据可视化
        if visualize:
            numeric_cols = df.select_dtypes(include=[np.number]).columns
            if len(numeric_cols) > 0:
                # 创建可视化目录
                viz_dir = os.path.join(os.path.dirname(output_path or file_path), "visualizations")
                os.makedirs(viz_dir, exist_ok=True)
                
                # 直方图
                for col in numeric_cols:
                    plt.figure(figsize=(10, 6))
                    plt.hist(df[col], bins=30)
                    plt.title(f"{col}分布图")
                    plt.xlabel(col)
                    plt.ylabel("频数")
                    plt.savefig(os.path.join(viz_dir, f"{col}_hist.png"))
                    plt.close()
                
                # 相关性热图
                if len(numeric_cols) > 1:
                    plt.figure(figsize=(12, 8))
                    sns.heatmap(df[numeric_cols].corr(), annot=True, cmap="coolwarm")
                    plt.title("相关性热图")
                    plt.tight_layout()
                    plt.savefig(os.path.join(viz_dir, "correlation_heatmap.png"))
                    plt.close()
                    
                results.append(f"\n可视化图表已保存至: {viz_dir}")
        
        # 保存处理后的数据
        if output_path:
            if output_format == "csv":
                df.to_csv(output_path, index=False, encoding="utf-8")
            elif output_format == "xlsx":
                df.to_excel(output_path, index=False)
            elif output_format == "sav":
                pyreadstat.write_sav(df, output_path)
            results.append(f"\n处理后的数据已保存: {output_path}")
        
        return "\n".join(results)
        
    except Exception as e:
        return f"处理失败: {str(e)}"

if __name__ == "__main__":
    mcp.run()