# -*- coding: utf-8 -*-
"""
MCP Survey Analyzer Server - 升级版

基于MCP协议的问卷调查数据分析服务
支持Excel和CSV格式数据，提供数据清洗、统计分析、聚类挖掘和可视化等功能
"""

import asyncio
import json
import base64
from pathlib import Path
from typing import Any, Dict, List, Optional
from datetime import datetime

import pandas as pd
import numpy as np
from mcp.server.models import InitializationOptions
from mcp.server import NotificationOptions, Server
from mcp.types import Resource, Tool, TextContent, ImageContent, EmbeddedResource
from pydantic import AnyUrl

# 导入升级后的模块
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)))

from core.config import Config
from utils.logger import get_logger, LoggerMixin
from utils.validators import DataValidator
from data.processor import DataProcessor
from analysis.statistics import StatisticalAnalyzer
from analysis.clustering import ClusterAnalyzer
from visualization.charts import ChartGenerator


class SurveyAnalyzer(LoggerMixin):
    """问卷调查数据分析器 - 升级版"""
    
    def __init__(self, config_path: Optional[str] = None):
        super().__init__()
        
        # 加载配置
        self.config = Config(config_path)
        
        # 初始化组件
        self.data_processor = DataProcessor(self.config)
        self.statistical_analyzer = StatisticalAnalyzer(self.config)
        self.cluster_analyzer = ClusterAnalyzer(self.config)
        self.chart_generator = ChartGenerator(self.config)
        self.validator = DataValidator()
        
        # 数据存储
        self.current_data = None
        self.cleaned_data = None
        self.analysis_results = {}
        
        # 输出目录
        self.output_dir = Path(self.config.get('output.base_directory', 'output'))
        self.output_dir.mkdir(parents=True, exist_ok=True)
        
        self.log_info("SurveyAnalyzer 升级版初始化完成")
    
    def load_survey_data(self, file_path: str, encoding: str = 'utf-8', **kwargs) -> Dict[str, Any]:
        """加载问卷数据
        
        Args:
            file_path: 文件路径
            encoding: 文件编码
            **kwargs: 其他加载参数
            
        Returns:
            加载结果
        """
        try:
            result = self.data_processor.load_data(file_path, encoding, **kwargs)
            
            if result.get('success'):
                self.current_data = result['data']
                self.log_info(f"数据加载成功: {result['data_info']['shape']}")
                
                # 返回不包含实际数据的结果（避免传输大量数据）
                return {
                    "success": True,
                    "message": "数据加载成功",
                    "file_info": result['file_info'],
                    "data_info": result['data_info'],
                    "validation": result['validation']
                }
            else:
                return result
                
        except Exception as e:
            self.log_error(f"加载数据失败: {str(e)}")
            return {"error": f"加载数据失败: {str(e)}"}
    
    def clean_survey_data(self, cleaning_options: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
        """清洗问卷数据
        
        Args:
            cleaning_options: 清洗选项
            
        Returns:
            清洗结果
        """
        try:
            if self.current_data is None:
                return {"error": "请先加载数据"}
            
            result = self.data_processor.clean_data(self.current_data, cleaning_options)
            
            if result.get('success'):
                self.cleaned_data = result['cleaned_data']
                self.log_info("数据清洗完成")
                
                return {
                    "success": True,
                    "message": "数据清洗完成",
                    "cleaned_data": result['cleaned_data'],
                    "cleaning_log": result['cleaning_log'],
                    "before_cleaning": result['before_cleaning'],
                    "after_cleaning": result['after_cleaning'],
                    "validation": result['validation']
                }
            else:
                return result
                
        except Exception as e:
            self.log_error(f"数据清洗失败: {str(e)}")
            return {"error": f"数据清洗失败: {str(e)}"}
    
    def analyze_survey_statistics(self, features: Optional[List[str]] = None, 
                                analysis_type: str = 'descriptive') -> Dict[str, Any]:
        """统计分析
        
        Args:
            features: 要分析的特征列表
            analysis_type: 分析类型 ('descriptive', 'correlation', 'normality')
            
        Returns:
            统计分析结果
        """
        try:
            data = self.cleaned_data if self.cleaned_data is not None else self.current_data
            
            if data is None:
                return {"error": "请先加载数据"}
            
            if analysis_type == 'descriptive':
                result = self.statistical_analyzer.descriptive_statistics(data, features)
            elif analysis_type == 'correlation':
                result = self.statistical_analyzer.correlation_analysis(data, features)
            elif analysis_type == 'normality':
                result = self.statistical_analyzer.normality_testing(data, features)
            else:
                return {"error": f"不支持的分析类型: {analysis_type}"}
            
            if result.get('success'):
                self.analysis_results[f'statistics_{analysis_type}'] = result
                self.log_info(f"统计分析完成: {analysis_type}")
            
            return result
            
        except Exception as e:
            self.log_error(f"统计分析失败: {str(e)}")
            return {"error": f"统计分析失败: {str(e)}"}
    
    def perform_cluster_analysis(self, features: Optional[List[str]] = None,
                               algorithm: str = 'kmeans',
                               n_clusters: int = 3,
                               **kwargs) -> Dict[str, Any]:
        """聚类分析
        
        Args:
            features: 用于聚类的特征列表
            algorithm: 聚类算法 ('kmeans', 'hierarchical', 'dbscan')
            n_clusters: 聚类数量
            **kwargs: 算法特定参数
            
        Returns:
            聚类分析结果
        """
        try:
            data = self.cleaned_data if self.cleaned_data is not None else self.current_data
            
            if data is None:
                return {"error": "请先加载数据"}
            
            result = self.cluster_analyzer.perform_clustering(
                data, features, algorithm, n_clusters, **kwargs
            )
            
            if result.get('success'):
                self.analysis_results['clustering'] = result
                self.log_info(f"聚类分析完成: {algorithm}")
            
            return result
            
        except Exception as e:
            self.log_error(f"聚类分析失败: {str(e)}")
            return {"error": f"聚类分析失败: {str(e)}"}
    
    def find_optimal_clusters(self, features: Optional[List[str]] = None,
                            max_clusters: int = 10,
                            algorithm: str = 'kmeans') -> Dict[str, Any]:
        """寻找最优聚类数量
        
        Args:
            features: 用于聚类的特征
            max_clusters: 最大聚类数量
            algorithm: 聚类算法
            
        Returns:
            最优聚类数量分析结果
        """
        try:
            data = self.cleaned_data if self.cleaned_data is not None else self.current_data
            
            if data is None:
                return {"error": "请先加载数据"}
            
            result = self.cluster_analyzer.find_optimal_clusters(
                data, features, max_clusters, algorithm
            )
            
            if result.get('success'):
                self.analysis_results['optimal_clusters'] = result
                self.log_info("最优聚类数量分析完成")
            
            return result
            
        except Exception as e:
            self.log_error(f"最优聚类数量分析失败: {str(e)}")
            return {"error": f"最优聚类数量分析失败: {str(e)}"}
    
    def create_visualization(self, chart_type: str, 
                           features: Optional[List[str]] = None,
                           **kwargs) -> Dict[str, Any]:
        """创建可视化图表
        
        Args:
            chart_type: 图表类型
            features: 要可视化的特征
            **kwargs: 其他参数
            
        Returns:
            可视化结果
        """
        try:
            data = self.cleaned_data if self.cleaned_data is not None else self.current_data
            
            if data is None:
                return {"error": "请先加载数据"}
            
            if chart_type == 'correlation_heatmap':
                result = self.chart_generator.create_correlation_heatmap(data, features)
            elif chart_type == 'distribution_plots':
                result = self.chart_generator.create_distribution_plots(data, features)
            elif chart_type == 'categorical_plots':
                result = self.chart_generator.create_categorical_plots(data, features)
            elif chart_type == 'cluster_scatter':
                cluster_labels = kwargs.get('cluster_labels')
                x_feature = kwargs.get('x_feature')
                y_feature = kwargs.get('y_feature')
                if cluster_labels and x_feature and y_feature:
                    result = self.chart_generator.create_cluster_scatter(
                        data, cluster_labels, x_feature, y_feature
                    )
                else:
                    return {"error": "聚类散点图需要cluster_labels, x_feature, y_feature参数"}
            elif chart_type == 'dashboard':
                cluster_labels = kwargs.get('cluster_labels')
                result = self.chart_generator.create_comprehensive_dashboard(data, cluster_labels)
            else:
                return {"error": f"不支持的图表类型: {chart_type}"}
            
            if result.get('success'):
                self.analysis_results[f'visualization_{chart_type}'] = result
                self.log_info(f"可视化创建完成: {chart_type}")
            
            return result
            
        except Exception as e:
            self.log_error(f"创建可视化失败: {str(e)}")
            return {"error": f"创建可视化失败: {str(e)}"}
    
    def hypothesis_testing(self, test_type: str, **kwargs) -> Dict[str, Any]:
        """假设检验
        
        Args:
            test_type: 检验类型
            **kwargs: 检验参数
            
        Returns:
            假设检验结果
        """
        try:
            data = self.cleaned_data if self.cleaned_data is not None else self.current_data
            
            if data is None:
                return {"error": "请先加载数据"}
            
            result = self.statistical_analyzer.hypothesis_testing(data, test_type, **kwargs)
            
            if result.get('success'):
                self.analysis_results[f'hypothesis_{test_type}'] = result
                self.log_info(f"假设检验完成: {test_type}")
            
            return result
            
        except Exception as e:
            self.log_error(f"假设检验失败: {str(e)}")
            return {"error": f"假设检验失败: {str(e)}"}
    
    def generate_user_profile(self) -> Dict[str, Any]:
        """生成用户画像
        
        Returns:
            用户画像结果
        """
        try:
            data = self.cleaned_data if self.cleaned_data is not None else self.current_data
            
            if data is None:
                return {"error": "请先加载数据"}
            
            # 基础统计信息
            basic_stats = self.statistical_analyzer.descriptive_statistics(data)
            
            # 聚类信息（如果有）
            cluster_info = self.analysis_results.get('clustering', {})
            
            profile = {
                "success": True,
                "data_overview": {
                    "total_samples": len(data),
                    "total_features": len(data.columns),
                    "numeric_features": len(data.select_dtypes(include=[np.number]).columns),
                    "categorical_features": len(data.select_dtypes(include=['object', 'category']).columns),
                    "missing_values": int(data.isnull().sum().sum()),
                    "duplicate_rows": int(data.duplicated().sum())
                },
                "statistical_summary": basic_stats.get('numeric_statistics', {}),
                "categorical_summary": basic_stats.get('categorical_statistics', {}),
                "cluster_profiles": cluster_info.get('cluster_profiles', {}),
                "key_insights": self._generate_insights(data, basic_stats, cluster_info)
            }
            
            self.analysis_results['user_profile'] = profile
            self.log_info("用户画像生成完成")
            
            return profile
            
        except Exception as e:
            self.log_error(f"生成用户画像失败: {str(e)}")
            return {"error": f"生成用户画像失败: {str(e)}"}
    
    def _generate_insights(self, data: pd.DataFrame, 
                         basic_stats: Dict[str, Any],
                         cluster_info: Dict[str, Any]) -> List[str]:
        """生成关键洞察"""
        insights = []
        
        # 数据质量洞察
        missing_ratio = data.isnull().sum().sum() / (len(data) * len(data.columns))
        if missing_ratio > 0.1:
            insights.append(f"数据缺失率较高 ({missing_ratio:.1%})，建议进行数据清洗")
        elif missing_ratio < 0.01:
            insights.append("数据质量良好，缺失值很少")
        
        # 样本量洞察
        if len(data) < 100:
            insights.append("样本量较小，统计结果可能不够稳定")
        elif len(data) > 10000:
            insights.append("大样本数据，统计结果具有较高可信度")
        
        # 特征多样性洞察
        numeric_cols = data.select_dtypes(include=[np.number]).columns
        if len(numeric_cols) > 0:
            # 检查数值特征的变异性
            cv_values = []
            for col in numeric_cols:
                if data[col].std() > 0 and data[col].mean() != 0:
                    cv = data[col].std() / abs(data[col].mean())
                    cv_values.append(cv)
            
            if cv_values:
                avg_cv = np.mean(cv_values)
                if avg_cv > 1:
                    insights.append("数值特征变异性较大，建议考虑数据标准化")
                elif avg_cv < 0.1:
                    insights.append("数值特征变异性较小，可能存在低方差特征")
        
        # 聚类洞察
        if cluster_info.get('success'):
            n_clusters = cluster_info.get('n_clusters', 0)
            if n_clusters > 1:
                insights.append(f"数据可以分为 {n_clusters} 个不同的群体")
                
                # 检查聚类质量
                silhouette = cluster_info.get('evaluation_metrics', {}).get('silhouette_score')
                if silhouette and silhouette > 0.5:
                    insights.append("聚类效果良好，群体区分明显")
                elif silhouette and silhouette < 0.3:
                    insights.append("聚类效果一般，群体边界不够清晰")
        
        return insights
    
    def save_analysis_results(self, include_data: bool = False) -> Dict[str, Any]:
        """保存分析结果
        
        Args:
            include_data: 是否包含原始数据
            
        Returns:
            保存结果
        """
        try:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            save_dir = self.output_dir / f"analysis_{timestamp}"
            save_dir.mkdir(parents=True, exist_ok=True)
            
            saved_files = []
            
            # 保存分析结果
            if self.analysis_results:
                results_file = save_dir / "analysis_results.json"
                with open(results_file, 'w', encoding='utf-8') as f:
                    # 移除不能序列化的对象
                    serializable_results = self._make_serializable(self.analysis_results)
                    json.dump(serializable_results, f, ensure_ascii=False, indent=2)
                saved_files.append(str(results_file))
            
            # 保存数据
            if include_data:
                if self.current_data is not None:
                    original_file = save_dir / "original_data.csv"
                    self.current_data.to_csv(original_file, index=False, encoding='utf-8-sig')
                    saved_files.append(str(original_file))
                
                if self.cleaned_data is not None:
                    cleaned_file = save_dir / "cleaned_data.csv"
                    self.cleaned_data.to_csv(cleaned_file, index=False, encoding='utf-8-sig')
                    saved_files.append(str(cleaned_file))
            
            # 生成综合报告
            report_file = save_dir / "analysis_report.md"
            self._generate_report(report_file)
            saved_files.append(str(report_file))
            
            result = {
                "success": True,
                "save_directory": str(save_dir),
                "saved_files": saved_files,
                "timestamp": timestamp
            }
            
            self.log_info(f"分析结果保存完成: {save_dir}")
            return result
            
        except Exception as e:
            self.log_error(f"保存分析结果失败: {str(e)}")
            return {"error": f"保存分析结果失败: {str(e)}"}
    
    def _make_serializable(self, obj: Any) -> Any:
        """使对象可序列化"""
        if isinstance(obj, dict):
            return {k: self._make_serializable(v) for k, v in obj.items()}
        elif isinstance(obj, list):
            return [self._make_serializable(item) for item in obj]
        elif isinstance(obj, (np.integer, np.floating)):
            return float(obj)
        elif isinstance(obj, np.ndarray):
            return obj.tolist()
        elif isinstance(obj, pd.DataFrame):
            return obj.to_dict()
        elif hasattr(obj, '__dict__'):
            return str(obj)
        else:
            return obj
    
    def _generate_report(self, report_file: Path) -> None:
        """生成分析报告"""
        with open(report_file, 'w', encoding='utf-8') as f:
            f.write("# 问卷调查数据分析报告\n\n")
            f.write(f"生成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n")
            
            # 数据概览
            if self.current_data is not None:
                f.write("## 数据概览\n\n")
                f.write(f"- 样本数量: {len(self.current_data)}\n")
                f.write(f"- 特征数量: {len(self.current_data.columns)}\n")
                f.write(f"- 缺失值: {self.current_data.isnull().sum().sum()}\n")
                f.write(f"- 重复行: {self.current_data.duplicated().sum()}\n\n")
            
            # 分析结果摘要
            f.write("## 分析结果摘要\n\n")
            for analysis_type, result in self.analysis_results.items():
                f.write(f"### {analysis_type}\n\n")
                if result.get('success'):
                    f.write("✅ 分析完成\n\n")
                else:
                    f.write(f"❌ 分析失败: {result.get('error', '未知错误')}\n\n")
            
            f.write("---\n\n")
            f.write("*本报告由 SurveyAnalyzer 升级版自动生成*\n")
    
    def get_analysis_summary(self) -> Dict[str, Any]:
        """获取分析摘要
        
        Returns:
            分析摘要
        """
        try:
            data = self.cleaned_data if self.cleaned_data is not None else self.current_data
            
            summary = {
                "success": True,
                "data_loaded": data is not None,
                "data_cleaned": self.cleaned_data is not None,
                "analysis_completed": list(self.analysis_results.keys()),
                "total_analyses": len(self.analysis_results)
            }
            
            if data is not None:
                summary.update({
                    "data_shape": list(data.shape),
                    "numeric_features": len(data.select_dtypes(include=[np.number]).columns),
                    "categorical_features": len(data.select_dtypes(include=['object', 'category']).columns),
                    "missing_values": int(data.isnull().sum().sum())
                })
            
            return summary
            
        except Exception as e:
            self.log_error(f"获取分析摘要失败: {str(e)}")
            return {"error": f"获取分析摘要失败: {str(e)}"}
    
    def set_custom_output_directory(self, directory: str) -> Dict[str, Any]:
        """设置自定义输出目录
        
        Args:
            directory: 输出目录路径
            
        Returns:
            设置结果
        """
        try:
            new_dir = Path(directory)
            new_dir.mkdir(parents=True, exist_ok=True)
            
            self.output_dir = new_dir
            self.config.set('output.base_directory', str(new_dir))
            
            # 更新组件的输出目录
            self.chart_generator.output_dir = new_dir
            
            result = {
                "success": True,
                "output_directory": str(new_dir),
                "message": "输出目录设置成功"
            }
            
            self.log_info(f"输出目录设置为: {new_dir}")
            return result
            
        except Exception as e:
            self.log_error(f"设置输出目录失败: {str(e)}")
            return {"error": f"设置输出目录失败: {str(e)}"}
    
    def get_saved_files_info(self) -> Dict[str, Any]:
        """获取已保存文件信息
        
        Returns:
            文件信息
        """
        try:
            files_info = []
            
            if self.output_dir.exists():
                for file_path in self.output_dir.rglob('*'):
                    if file_path.is_file():
                        files_info.append({
                            "path": str(file_path),
                            "name": file_path.name,
                            "size_mb": file_path.stat().st_size / 1024 / 1024,
                            "modified": datetime.fromtimestamp(file_path.stat().st_mtime).isoformat()
                        })
            
            return {
                "success": True,
                "output_directory": str(self.output_dir),
                "total_files": len(files_info),
                "files": sorted(files_info, key=lambda x: x['modified'], reverse=True)
            }
            
        except Exception as e:
            self.log_error(f"获取文件信息失败: {str(e)}")
            return {"error": f"获取文件信息失败: {str(e)}"}


# 创建全局分析器实例
analyzer = SurveyAnalyzer()

# 创建MCP服务器
app = Server("survey-analyzer")


@app.list_resources()
async def handle_list_resources() -> list[Resource]:
    """列出可用资源"""
    return [
        Resource(
            uri=AnyUrl("survey://data/current"),
            name="当前数据集",
            description="当前加载的问卷调查数据",
            mimeType="application/json"
        ),
        Resource(
            uri=AnyUrl("survey://analysis/results"),
            name="分析结果",
            description="所有分析结果的汇总",
            mimeType="application/json"
        )
    ]


@app.read_resource()
async def handle_read_resource(uri: AnyUrl) -> str:
    """读取资源内容"""
    if str(uri) == "survey://data/current":
        if analyzer.current_data is not None:
            return analyzer.current_data.to_json(orient='records', force_ascii=False)
        else:
            return json.dumps({"error": "没有加载数据"}, ensure_ascii=False)
    
    elif str(uri) == "survey://analysis/results":
        return json.dumps(analyzer.analysis_results, ensure_ascii=False, default=str)
    
    else:
        raise ValueError(f"未知资源: {uri}")


@app.list_tools()
async def handle_list_tools() -> list[Tool]:
    """列出可用工具"""
    return [
        Tool(
            name="load_survey_data",
            description="加载问卷调查数据文件（支持CSV、Excel格式）",
            inputSchema={
                "type": "object",
                "properties": {
                    "file_path": {"type": "string", "description": "数据文件路径"},
                    "encoding": {"type": "string", "description": "文件编码", "default": "utf-8"},
                    "sheet_name": {"type": "string", "description": "Excel工作表名称（可选）"}
                },
                "required": ["file_path"]
            }
        ),
        Tool(
            name="clean_survey_data",
            description="清洗问卷调查数据",
            inputSchema={
                "type": "object",
                "properties": {
                    "remove_duplicates": {"type": "boolean", "description": "是否删除重复行", "default": True},
                    "missing_value_strategy": {"type": "string", "description": "缺失值处理策略", "enum": ["auto", "drop_rows", "drop_columns", "fill_mean", "fill_median", "knn_impute", "none"], "default": "auto"},
                    "outlier_strategy": {"type": "string", "description": "异常值处理策略", "enum": ["none", "iqr_remove", "iqr_cap", "zscore_remove"], "default": "none"},
                    "clean_column_names": {"type": "boolean", "description": "是否清理列名", "default": True},
                    "auto_convert_types": {"type": "boolean", "description": "是否自动转换数据类型", "default": True}
                }
            }
        ),
        Tool(
            name="analyze_survey_statistics",
            description="进行统计分析",
            inputSchema={
                "type": "object",
                "properties": {
                    "features": {"type": "array", "items": {"type": "string"}, "description": "要分析的特征列表（可选）"},
                    "analysis_type": {"type": "string", "description": "分析类型", "enum": ["descriptive", "correlation", "normality"], "default": "descriptive"}
                }
            }
        ),
        Tool(
            name="perform_cluster_analysis",
            description="执行聚类分析",
            inputSchema={
                "type": "object",
                "properties": {
                    "features": {"type": "array", "items": {"type": "string"}, "description": "用于聚类的特征列表（可选）"},
                    "algorithm": {"type": "string", "description": "聚类算法", "enum": ["kmeans", "hierarchical", "dbscan", "spectral"], "default": "kmeans"},
                    "n_clusters": {"type": "integer", "description": "聚类数量", "default": 3},
                    "random_state": {"type": "integer", "description": "随机种子", "default": 42}
                }
            }
        ),
        Tool(
            name="find_optimal_clusters",
            description="寻找最优聚类数量",
            inputSchema={
                "type": "object",
                "properties": {
                    "features": {"type": "array", "items": {"type": "string"}, "description": "用于聚类的特征列表（可选）"},
                    "max_clusters": {"type": "integer", "description": "最大聚类数量", "default": 10},
                    "algorithm": {"type": "string", "description": "聚类算法", "enum": ["kmeans", "hierarchical", "spectral"], "default": "kmeans"}
                }
            }
        ),
        Tool(
            name="create_visualization",
            description="创建数据可视化图表",
            inputSchema={
                "type": "object",
                "properties": {
                    "chart_type": {"type": "string", "description": "图表类型", "enum": ["correlation_heatmap", "distribution_plots", "categorical_plots", "cluster_scatter", "dashboard"]},
                    "features": {"type": "array", "items": {"type": "string"}, "description": "要可视化的特征列表（可选）"},
                    "cluster_labels": {"type": "array", "items": {"type": "integer"}, "description": "聚类标签（用于聚类散点图）"},
                    "x_feature": {"type": "string", "description": "X轴特征（用于散点图）"},
                    "y_feature": {"type": "string", "description": "Y轴特征（用于散点图）"},

                },
                "required": ["chart_type"]
            }
        ),
        Tool(
            name="hypothesis_testing",
            description="进行假设检验",
            inputSchema={
                "type": "object",
                "properties": {
                    "test_type": {"type": "string", "description": "检验类型", "enum": ["ttest_1samp", "ttest_ind", "chi2", "anova"]},
                    "feature": {"type": "string", "description": "要检验的特征"},
                    "group_column": {"type": "string", "description": "分组列"},
                    "population_mean": {"type": "number", "description": "总体均值（用于单样本t检验）"},
                    "feature1": {"type": "string", "description": "第一个特征（用于卡方检验）"},
                    "feature2": {"type": "string", "description": "第二个特征（用于卡方检验）"}
                },
                "required": ["test_type"]
            }
        ),
        Tool(
            name="generate_user_profile",
            description="生成用户群体画像",
            inputSchema={"type": "object", "properties": {}}
        ),
        Tool(
            name="save_analysis_results",
            description="保存分析结果到本地",
            inputSchema={
                "type": "object",
                "properties": {
                    "include_data": {"type": "boolean", "description": "是否包含原始数据", "default": False}
                }
            }
        ),
        Tool(
            name="get_analysis_summary",
            description="获取分析摘要",
            inputSchema={"type": "object", "properties": {}}
        ),
        Tool(
            name="set_custom_output_directory",
            description="设置自定义输出目录",
            inputSchema={
                "type": "object",
                "properties": {
                    "directory": {"type": "string", "description": "输出目录路径"}
                },
                "required": ["directory"]
            }
        ),
        Tool(
            name="get_saved_files_info",
            description="获取已保存文件信息",
            inputSchema={"type": "object", "properties": {}}
        )
    ]


@app.call_tool()
async def handle_call_tool(name: str, arguments: dict) -> list[TextContent | ImageContent | EmbeddedResource]:
    """处理工具调用"""
    try:
        if name == "load_survey_data":
            result = analyzer.load_survey_data(**arguments)
        elif name == "clean_survey_data":
            result = analyzer.clean_survey_data(arguments)
        elif name == "analyze_survey_statistics":
            result = analyzer.analyze_survey_statistics(**arguments)
        elif name == "perform_cluster_analysis":
            result = analyzer.perform_cluster_analysis(**arguments)
        elif name == "find_optimal_clusters":
            result = analyzer.find_optimal_clusters(**arguments)
        elif name == "create_visualization":
            result = analyzer.create_visualization(**arguments)
        elif name == "hypothesis_testing":
            result = analyzer.hypothesis_testing(**arguments)
        elif name == "generate_user_profile":
            result = analyzer.generate_user_profile()
        elif name == "save_analysis_results":
            result = analyzer.save_analysis_results(**arguments)
        elif name == "get_analysis_summary":
            result = analyzer.get_analysis_summary()
        elif name == "set_custom_output_directory":
            result = analyzer.set_custom_output_directory(**arguments)
        elif name == "get_saved_files_info":
            result = analyzer.get_saved_files_info()
        else:
            result = {"error": f"未知工具: {name}"}
        
        # 处理包含图像的结果
        if isinstance(result, dict) and "image_base64" in result:
            return [
                TextContent(
                    type="text",
                    text=json.dumps({k: v for k, v in result.items() if k != "image_base64"}, 
                                  ensure_ascii=False, indent=2, default=str)
                ),
                ImageContent(
                    type="image",
                    data=result["image_base64"],
                    mimeType="image/png"
                )
            ]
        else:
            return [
                TextContent(
                    type="text",
                    text=json.dumps(result, ensure_ascii=False, indent=2, default=str)
                )
            ]
    
    except Exception as e:
        error_result = {"error": f"工具执行失败: {str(e)}"}
        return [
            TextContent(
                type="text",
                text=json.dumps(error_result, ensure_ascii=False, indent=2)
            )
        ]


if __name__ == "__main__":
    # 启动Flask Web服务器
    from web.app import create_app
    web_app = create_app()
    web_app.run(host='127.0.0.1', port=5000, debug=True)