#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
MCP Survey Analyzer Server

这是一个基于MCP协议的问卷调查数据分析服务，提供数据清洗、统计分析、聚类挖掘和可视化等功能。
支持Excel和CSV格式的问卷数据，能够自动生成用户群体画像，并结合自然语言解释提升数据分析能力。
"""

from mcp.server.fastmcp import FastMCP
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt

import seaborn as sns
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
import json
import os
from typing import Dict, List, Optional, Union, Any
import warnings
from datetime import datetime
import base64
from io import BytesIO
import shutil


# 支持中文显示
plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号

# 清理字体缓存以确保字体设置生效
try:
    import matplotlib.font_manager as fm
    fm._rebuild()
except:
    pass

warnings.filterwarnings('ignore')

# 默认输出目录配置
DEFAULT_OUTPUT_DIR = os.path.join(os.path.dirname(__file__), 'output')
OUTPUT_DIR = DEFAULT_OUTPUT_DIR
CHARTS_DIR = os.path.join(OUTPUT_DIR, 'charts')
REPORTS_DIR = os.path.join(OUTPUT_DIR, 'reports')
DATA_DIR = os.path.join(OUTPUT_DIR, 'data')

# 确保输出目录存在
for dir_path in [OUTPUT_DIR, CHARTS_DIR, REPORTS_DIR, DATA_DIR]:
    os.makedirs(dir_path, exist_ok=True)

def set_output_directory(custom_dir: str) -> bool:
    """设置自定义输出目录"""
    global OUTPUT_DIR, CHARTS_DIR, REPORTS_DIR, DATA_DIR
    try:
        if not os.path.exists(custom_dir):
            os.makedirs(custom_dir, exist_ok=True)
        
        OUTPUT_DIR = custom_dir
        CHARTS_DIR = os.path.join(OUTPUT_DIR, 'charts')
        REPORTS_DIR = os.path.join(OUTPUT_DIR, 'reports')
        DATA_DIR = os.path.join(OUTPUT_DIR, 'data')
        
        # 确保子目录存在
        for dir_path in [CHARTS_DIR, REPORTS_DIR, DATA_DIR]:
            os.makedirs(dir_path, exist_ok=True)
        
        return True
    except Exception:
        return False

# 创建MCP服务实例
mcp = FastMCP("Survey Analyzer Server")

class SurveyAnalyzer:
    """问卷调查数据分析器"""
    
    def __init__(self):
        self.data = None
        self.cleaned_data = None
        self.analysis_results = {}
        self.cluster_results = {}
        self.saved_files = []  # 记录保存的文件路径
        
    def load_data(self, file_path: str) -> Dict[str, Any]:
        """加载数据文件"""
        try:
            if file_path.endswith('.csv'):
                self.data = pd.read_csv(file_path, encoding='utf-8')
            elif file_path.endswith(('.xlsx', '.xls')):
                self.data = pd.read_excel(file_path)
            else:
                return {"error": "不支持的文件格式，请使用CSV或Excel文件"}
            
            return {
                "success": True,
                "shape": list(self.data.shape),
                "columns": list(self.data.columns),
                "sample_data": self.data.head().to_dict('records')
            }
        except Exception as e:
            return {"error": f"数据加载失败: {str(e)}"}
    
    def clean_data(self, remove_duplicates: bool = False) -> Dict[str, Any]:
        """数据清洗
        
        Args:
            remove_duplicates: 是否删除重复数据，默认为False（保留重复值）
        """
        if self.data is None:
            return {"error": "请先加载数据"}
        
        try:
            self.cleaned_data = self.data.copy()
            
            # 记录清洗信息
            cleaning_info = {
                "original_shape": list(self.data.shape),
                "missing_values": {},
                "duplicates_removed": 0,
                "outliers_handled": 0,
                "remove_duplicates_enabled": remove_duplicates
            }
            
            # 处理缺失值
            missing_counts = self.cleaned_data.isnull().sum()
            cleaning_info["missing_values"] = missing_counts[missing_counts > 0].to_dict()
            
            # 数值列用均值填充（如果有异常值则用中位数），文字类列用前向/后向填充
            for col in self.cleaned_data.columns:
                if self.cleaned_data[col].dtype in ['int64', 'float64']:
                    # 检查是否有异常值，如果有则用中位数填充，否则用均值填充
                    if self.cleaned_data[col].isnull().sum() > 0:
                        Q1 = self.cleaned_data[col].quantile(0.25)
                        Q3 = self.cleaned_data[col].quantile(0.75)
                        IQR = Q3 - Q1
                        lower_bound = Q1 - 1.5 * IQR
                        upper_bound = Q3 + 1.5 * IQR
                        
                        # 检查是否存在异常值
                        has_outliers = ((self.cleaned_data[col] < lower_bound) | (self.cleaned_data[col] > upper_bound)).any()
                        
                        if has_outliers:
                            # 有异常值时用中位数填充，更稳健
                            fill_value = self.cleaned_data[col].median()
                        else:
                            # 无异常值时用均值填充
                            fill_value = self.cleaned_data[col].mean()
                        
                        self.cleaned_data[col].fillna(fill_value, inplace=True)
                else:
                    # 对于文字类数据，优先使用前向填充，然后后向填充
                    initial_missing = self.cleaned_data[col].isnull().sum()
                    if initial_missing > 0:
                        self.cleaned_data[col] = self.cleaned_data[col].fillna(method='ffill')
                        after_ffill_missing = self.cleaned_data[col].isnull().sum()
                        
                        if after_ffill_missing > 0:
                            self.cleaned_data[col] = self.cleaned_data[col].fillna(method='bfill')
                            after_bfill_missing = self.cleaned_data[col].isnull().sum()
                            
                            if after_bfill_missing > 0:
                                # 如果前向和后向填充后仍有缺失值，使用众数填充
                                mode_value = self.cleaned_data[col].mode()
                                if len(mode_value) > 0:
                                    self.cleaned_data[col].fillna(mode_value.iloc[0], inplace=True)
                                else:
                                    self.cleaned_data[col].fillna('Unknown', inplace=True)
            
            # 根据参数决定是否删除重复行
            if remove_duplicates:
                duplicates_before = len(self.cleaned_data)
                self.cleaned_data.drop_duplicates(inplace=True)
                cleaning_info["duplicates_removed"] = int(duplicates_before - len(self.cleaned_data))
            else:
                cleaning_info["duplicates_removed"] = 0
                cleaning_info["note"] = "重复数据已保留"
            
            # 处理数值列的异常值（使用IQR方法）
            numeric_cols = self.cleaned_data.select_dtypes(include=[np.number]).columns
            outliers_count = 0
            
            for col in numeric_cols:
                Q1 = self.cleaned_data[col].quantile(0.25)
                Q3 = self.cleaned_data[col].quantile(0.75)
                IQR = Q3 - Q1
                lower_bound = Q1 - 1.5 * IQR
                upper_bound = Q3 + 1.5 * IQR
                
                outliers_mask = (self.cleaned_data[col] < lower_bound) | (self.cleaned_data[col] > upper_bound)
                outliers_count += outliers_mask.sum()
                
                # 用边界值替换异常值
                self.cleaned_data.loc[self.cleaned_data[col] < lower_bound, col] = lower_bound
                self.cleaned_data.loc[self.cleaned_data[col] > upper_bound, col] = upper_bound
            
            cleaning_info["outliers_handled"] = int(outliers_count)
            cleaning_info["final_shape"] = list(self.cleaned_data.shape)
            
            # 自动保存清洗后的数据到本地
            self._auto_save_results()
            
            return {
                "success": True,
                "cleaning_info": cleaning_info,
                "cleaned_sample": self.cleaned_data.head().to_dict('records'),
                "auto_saved": True
            }
            
        except Exception as e:
            return {"error": f"数据清洗失败: {str(e)}"}
    
    def statistical_analysis(self) -> Dict[str, Any]:
        """统计分析"""
        if self.cleaned_data is None:
            return {"error": "请先进行数据清洗"}
        
        try:
            results = {}
            
            # 基础统计信息
            results["basic_stats"] = self.cleaned_data.describe().to_dict()
            
            # 数值列相关性分析
            numeric_cols = self.cleaned_data.select_dtypes(include=[np.number]).columns
            if len(numeric_cols) > 1:
                correlation_matrix = self.cleaned_data[numeric_cols].corr()
                results["correlation"] = correlation_matrix.to_dict()
            
            # 分类变量频次统计
            categorical_cols = self.cleaned_data.select_dtypes(include=['object']).columns
            results["categorical_stats"] = {}
            
            for col in categorical_cols:
                value_counts = self.cleaned_data[col].value_counts()
                results["categorical_stats"][col] = {
                    "value_counts": value_counts.to_dict(),
                    "unique_count": len(value_counts),
                    "most_frequent": value_counts.index[0] if len(value_counts) > 0 else None
                }
            
            self.analysis_results = results
            
            # 自动保存分析结果到本地
            self._auto_save_results()
            
            return {"success": True, "results": results, "auto_saved": True}
            
        except Exception as e:
            return {"error": f"统计分析失败: {str(e)}"}
    
    def cluster_analysis(self, n_clusters: int = 3, features: Optional[List[str]] = None) -> Dict[str, Any]:
        """聚类分析"""
        if self.cleaned_data is None:
            return {"error": "请先进行数据清洗"}
        
        try:
            # 选择用于聚类的特征
            if features is None:
                numeric_cols = self.cleaned_data.select_dtypes(include=[np.number]).columns
                if len(numeric_cols) == 0:
                    return {"error": "没有可用于聚类的数值特征"}
                features = list(numeric_cols)
            
            cluster_data = self.cleaned_data[features].copy()
            
            # 标准化数据
            scaler = StandardScaler()
            scaled_data = scaler.fit_transform(cluster_data)
            
            # K-means聚类
            kmeans = KMeans(n_clusters=n_clusters, random_state=42, n_init=10)
            cluster_labels = kmeans.fit_predict(scaled_data)
            
            # 添加聚类标签到数据
            self.cleaned_data['cluster'] = cluster_labels
            
            # 计算聚类中心
            cluster_centers = scaler.inverse_transform(kmeans.cluster_centers_)
            
            # 分析每个聚类的特征
            cluster_profiles = {}
            for i in range(n_clusters):
                cluster_mask = cluster_labels == i
                cluster_subset = self.cleaned_data[cluster_mask]
                
                profile = {
                    "size": int(cluster_mask.sum()),
                    "percentage": float(cluster_mask.sum() / len(self.cleaned_data) * 100),
                    "center": {feat: float(center) for feat, center in zip(features, cluster_centers[i])},
                    "stats": cluster_subset[features].describe().to_dict()
                }
                
                # 分类变量的分布
                categorical_cols = self.cleaned_data.select_dtypes(include=['object']).columns
                profile["categorical_distribution"] = {}
                for col in categorical_cols:
                    if col != 'cluster':
                        dist = cluster_subset[col].value_counts(normalize=True).to_dict()
                        profile["categorical_distribution"][col] = dist
                
                cluster_profiles[f"cluster_{i}"] = profile
            
            # 计算轮廓系数
            from sklearn.metrics import silhouette_score
            silhouette_avg = silhouette_score(scaled_data, cluster_labels)
            
            self.cluster_results = {
                "n_clusters": n_clusters,
                "features_used": features,
                "silhouette_score": float(silhouette_avg),
                "cluster_profiles": cluster_profiles
            }
            
            # 自动保存分析结果到本地
            self._auto_save_results()
            
            return {"success": True, "results": self.cluster_results, "auto_saved": True}
            
        except Exception as e:
            return {"error": f"聚类分析失败: {str(e)}"}
    
    def generate_visualization(self, chart_type: str, **kwargs) -> Dict[str, Any]:
        """生成可视化图表"""
        if self.cleaned_data is None:
            return {"error": "请先进行数据清洗"}
        
        try:
            plt.figure(figsize=(10, 6))
            
            if chart_type == "correlation_heatmap":
                numeric_cols = self.cleaned_data.select_dtypes(include=[np.number]).columns
                if len(numeric_cols) < 2:
                    return {"error": "需要至少2个数值列才能生成相关性热力图"}
                
                correlation_matrix = self.cleaned_data[numeric_cols].corr()
                sns.heatmap(correlation_matrix, annot=True, cmap='coolwarm', center=0)
                plt.title('特征相关性热力图')
                
            elif chart_type == "cluster_scatter":
                if 'cluster' not in self.cleaned_data.columns:
                    return {"error": "请先进行聚类分析"}
                
                features = kwargs.get('features', [])
                if len(features) < 2:
                    numeric_cols = self.cleaned_data.select_dtypes(include=[np.number]).columns
                    features = list(numeric_cols)[:2]
                
                if len(features) < 2:
                    return {"error": "需要至少2个数值特征才能生成散点图"}
                
                scatter = plt.scatter(self.cleaned_data[features[0]], 
                                    self.cleaned_data[features[1]], 
                                    c=self.cleaned_data['cluster'], 
                                    cmap='viridis', alpha=0.6)
                plt.xlabel(features[0])
                plt.ylabel(features[1])
                plt.title('聚类散点图')
                plt.colorbar(scatter)
                
            elif chart_type == "distribution":
                column = kwargs.get('column')
                if not column or column not in self.cleaned_data.columns:
                    return {"error": "请指定有效的列名"}
                
                if self.cleaned_data[column].dtype in ['int64', 'float64']:
                    plt.hist(self.cleaned_data[column], bins=30, alpha=0.7, edgecolor='black')
                    plt.xlabel(column)
                    plt.ylabel('频次')
                    plt.title(f'{column} 分布图')
                else:
                    value_counts = self.cleaned_data[column].value_counts()
                    plt.bar(range(len(value_counts)), value_counts.values)
                    plt.xlabel(column)
                    plt.ylabel('频次')
                    plt.title(f'{column} 分布图')
                    plt.xticks(range(len(value_counts)), value_counts.index, rotation=45)
            
            else:
                return {"error": f"不支持的图表类型: {chart_type}"}
            
            plt.tight_layout()
            
            # 生成文件名
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            filename = f"{chart_type}_{timestamp}.png"
            file_path = os.path.join(CHARTS_DIR, filename)
            
            # 保存图表到本地
            plt.savefig(file_path, format='png', dpi=300, bbox_inches='tight')
            self.saved_files.append(file_path)
            
            # 将图表转换为base64字符串
            buffer = BytesIO()
            plt.savefig(buffer, format='png', dpi=300, bbox_inches='tight')
            buffer.seek(0)
            image_base64 = base64.b64encode(buffer.getvalue()).decode()
            plt.close()
            
            # 自动保存分析结果到本地
            self._auto_save_results()
            
            return {
                "success": True,
                "chart_type": chart_type,
                "image_base64": image_base64,
                "saved_path": file_path,
                "auto_saved": True
            }
            
        except Exception as e:
            plt.close()
            return {"error": f"可视化生成失败: {str(e)}"}
    
    def generate_user_profile(self) -> Dict[str, Any]:
        """生成用户群体画像"""
        if self.cleaned_data is None:
            return {"error": "请先进行数据清洗"}
        
        try:
            profile = {
                "data_overview": {
                    "total_samples": len(self.cleaned_data),
                    "features_count": len(self.cleaned_data.columns),
                    "data_types": {str(k): int(v) for k, v in self.cleaned_data.dtypes.value_counts().to_dict().items()}
                },
                "statistical_summary": {},
                "key_insights": []
            }
            
            # 数值特征统计
            numeric_cols = self.cleaned_data.select_dtypes(include=[np.number]).columns
            if len(numeric_cols) > 0:
                profile["statistical_summary"]["numeric_features"] = {}
                for col in numeric_cols:
                    stats = self.cleaned_data[col].describe()
                    profile["statistical_summary"]["numeric_features"][col] = {
                        "mean": float(stats['mean']),
                        "std": float(stats['std']),
                        "min": float(stats['min']),
                        "max": float(stats['max']),
                        "median": float(stats['50%'])
                    }
            
            # 分类特征统计
            categorical_cols = self.cleaned_data.select_dtypes(include=['object']).columns
            if len(categorical_cols) > 0:
                profile["statistical_summary"]["categorical_features"] = {}
                for col in categorical_cols:
                    if col != 'cluster':
                        value_counts = self.cleaned_data[col].value_counts()
                        profile["statistical_summary"]["categorical_features"][col] = {
                            "unique_count": len(value_counts),
                            "most_common": value_counts.index[0] if len(value_counts) > 0 else None,
                            "most_common_percentage": float(value_counts.iloc[0] / len(self.cleaned_data) * 100) if len(value_counts) > 0 else 0,
                            "distribution": {str(k): int(v) for k, v in value_counts.head(5).to_dict().items()}
                        }
            
            # 聚类信息
            if 'cluster' in self.cleaned_data.columns:
                cluster_dist = self.cleaned_data['cluster'].value_counts().sort_index()
                profile["cluster_distribution"] = {
                    "cluster_counts": {int(k): int(v) for k, v in cluster_dist.to_dict().items()},
                    "cluster_percentages": {int(k): float(v) for k, v in (cluster_dist / len(self.cleaned_data) * 100).to_dict().items()}
                }
            
            # 生成关键洞察
            insights = []
            
            # 样本量洞察
            if len(self.cleaned_data) < 100:
                insights.append("样本量较小，分析结果可能存在一定局限性")
            elif len(self.cleaned_data) > 1000:
                insights.append("样本量充足，分析结果具有较高可信度")
            
            # 数据质量洞察
            missing_ratio = self.data.isnull().sum().sum() / (self.data.shape[0] * self.data.shape[1])
            if missing_ratio > 0.1:
                insights.append(f"数据缺失率为{missing_ratio:.1%}，建议关注数据质量")
            
            # 特征多样性洞察
            if len(numeric_cols) > len(categorical_cols):
                insights.append("数值特征较多，适合进行统计分析和聚类")
            elif len(categorical_cols) > len(numeric_cols):
                insights.append("分类特征较多，适合进行分组分析")
            
            profile["key_insights"] = insights
            
            return {"success": True, "profile": profile}
            
        except Exception as e:
            return {"error": f"用户画像生成失败: {str(e)}"}
    
    def _auto_save_results(self) -> None:
        """自动保存分析结果（静默模式，不返回详细信息）"""
        try:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            
            # 保存统计分析结果
            if self.analysis_results:
                stats_file = os.path.join(REPORTS_DIR, f"statistical_analysis_{timestamp}.json")
                if stats_file not in self.saved_files:  # 避免重复保存
                    with open(stats_file, 'w', encoding='utf-8') as f:
                        json.dump(self.analysis_results, f, ensure_ascii=False, indent=2)
                    self.saved_files.append(stats_file)
            
            # 保存聚类分析结果
            if self.cluster_results:
                cluster_file = os.path.join(REPORTS_DIR, f"cluster_analysis_{timestamp}.json")
                if cluster_file not in self.saved_files:  # 避免重复保存
                    with open(cluster_file, 'w', encoding='utf-8') as f:
                        json.dump(self.cluster_results, f, ensure_ascii=False, indent=2)
                    self.saved_files.append(cluster_file)
            
            # 保存用户画像
            profile_result = self.generate_user_profile()
            if profile_result.get('success'):
                profile_file = os.path.join(REPORTS_DIR, f"user_profile_{timestamp}.json")
                if profile_file not in self.saved_files:  # 避免重复保存
                    with open(profile_file, 'w', encoding='utf-8') as f:
                        json.dump(profile_result['profile'], f, ensure_ascii=False, indent=2)
                    self.saved_files.append(profile_file)
            
            # 保存数据文件
            if self.data is not None:
                raw_data_file = os.path.join(DATA_DIR, f"raw_data_{timestamp}.csv")
                if raw_data_file not in self.saved_files:  # 避免重复保存
                    self.data.to_csv(raw_data_file, index=False, encoding='utf-8-sig')
                    self.saved_files.append(raw_data_file)
            
            if self.cleaned_data is not None:
                cleaned_data_file = os.path.join(DATA_DIR, f"cleaned_data_{timestamp}.csv")
                if cleaned_data_file not in self.saved_files:  # 避免重复保存
                    self.cleaned_data.to_csv(cleaned_data_file, index=False, encoding='utf-8-sig')
                    self.saved_files.append(cleaned_data_file)
            
            # 生成综合报告
            summary_report = self._generate_summary_report(timestamp)
            summary_file = os.path.join(REPORTS_DIR, f"analysis_summary_{timestamp}.md")
            if summary_file not in self.saved_files:  # 避免重复保存
                with open(summary_file, 'w', encoding='utf-8') as f:
                    f.write(summary_report)
                self.saved_files.append(summary_file)
                
        except Exception as e:
            # 静默处理错误，不影响主要功能
            pass
    
    def save_analysis_results(self, save_data: bool = True) -> Dict[str, Any]:
        """保存分析结果到本地文件"""
        try:
            saved_files = []
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            
            # 保存统计分析结果
            if self.analysis_results:
                stats_file = os.path.join(REPORTS_DIR, f"statistical_analysis_{timestamp}.json")
                with open(stats_file, 'w', encoding='utf-8') as f:
                    json.dump(self.analysis_results, f, ensure_ascii=False, indent=2)
                saved_files.append(stats_file)
                self.saved_files.append(stats_file)
            
            # 保存聚类分析结果
            if self.cluster_results:
                cluster_file = os.path.join(REPORTS_DIR, f"cluster_analysis_{timestamp}.json")
                with open(cluster_file, 'w', encoding='utf-8') as f:
                    json.dump(self.cluster_results, f, ensure_ascii=False, indent=2)
                saved_files.append(cluster_file)
                self.saved_files.append(cluster_file)
            
            # 保存用户画像
            profile_result = self.generate_user_profile()
            if profile_result.get('success'):
                profile_file = os.path.join(REPORTS_DIR, f"user_profile_{timestamp}.json")
                with open(profile_file, 'w', encoding='utf-8') as f:
                    json.dump(profile_result['profile'], f, ensure_ascii=False, indent=2)
                saved_files.append(profile_file)
                self.saved_files.append(profile_file)
            
            # 保存数据文件
            if save_data:
                if self.data is not None:
                    raw_data_file = os.path.join(DATA_DIR, f"raw_data_{timestamp}.csv")
                    self.data.to_csv(raw_data_file, index=False, encoding='utf-8-sig')
                    saved_files.append(raw_data_file)
                    self.saved_files.append(raw_data_file)
                
                if self.cleaned_data is not None:
                    cleaned_data_file = os.path.join(DATA_DIR, f"cleaned_data_{timestamp}.csv")
                    self.cleaned_data.to_csv(cleaned_data_file, index=False, encoding='utf-8-sig')
                    saved_files.append(cleaned_data_file)
                    self.saved_files.append(cleaned_data_file)
            
            # 生成综合报告
            summary_report = self._generate_summary_report(timestamp)
            summary_file = os.path.join(REPORTS_DIR, f"analysis_summary_{timestamp}.md")
            with open(summary_file, 'w', encoding='utf-8') as f:
                f.write(summary_report)
            saved_files.append(summary_file)
            self.saved_files.append(summary_file)
            
            return {
                "success": True,
                "saved_files": saved_files,
                "output_directory": OUTPUT_DIR,
                "timestamp": timestamp
            }
            
        except Exception as e:
            return {"error": f"保存分析结果失败: {str(e)}"}
    
    def _generate_summary_report(self, timestamp: str) -> str:
        """生成Markdown格式的综合分析报告"""
        report = f"""# 问卷调查数据分析报告

**生成时间**: {datetime.now().strftime('%Y年%m月%d日 %H:%M:%S')}
**分析标识**: {timestamp}

## 数据概览
"""
        
        if self.cleaned_data is not None:
            report += f"""
- **样本数量**: {len(self.cleaned_data)}
- **特征数量**: {len(self.cleaned_data.columns)}
- **数据维度**: {self.cleaned_data.shape[0]} × {self.cleaned_data.shape[1]}
"""
        
        # 统计分析部分
        if self.analysis_results:
            report += "\n## 统计分析结果\n\n"
            
            # 数值特征统计
            if 'basic_stats' in self.analysis_results:
                report += "### 数值特征基本统计\n\n"
                for col, stats in self.analysis_results['basic_stats'].items():
                    if isinstance(stats, dict) and 'mean' in stats:
                        report += f"**{col}**:\n"
                        report += f"- 均值: {stats['mean']:.2f}\n"
                        report += f"- 标准差: {stats['std']:.2f}\n"
                        report += f"- 最小值: {stats['min']:.2f}\n"
                        report += f"- 最大值: {stats['max']:.2f}\n\n"
            
            # 分类特征统计
            if 'categorical_stats' in self.analysis_results:
                report += "### 分类特征统计\n\n"
                for col, stats in self.analysis_results['categorical_stats'].items():
                    report += f"**{col}**:\n"
                    report += f"- 唯一值数量: {stats['unique_count']}\n"
                    report += f"- 最频繁类别: {stats['most_frequent']}\n\n"
        
        # 聚类分析部分
        if self.cluster_results:
            report += "\n## 聚类分析结果\n\n"
            report += f"- **聚类数量**: {self.cluster_results['n_clusters']}\n"
            report += f"- **轮廓系数**: {self.cluster_results['silhouette_score']:.3f}\n"
            report += f"- **使用特征**: {', '.join(self.cluster_results['features_used'])}\n\n"
            
            for cluster_name, profile in self.cluster_results['cluster_profiles'].items():
                report += f"### {cluster_name.replace('_', ' ').title()}\n\n"
                report += f"- **群体大小**: {profile['size']} ({profile['percentage']:.1f}%)\n"
                report += "- **特征中心点**:\n"
                for feat, value in profile['center'].items():
                    report += f"  - {feat}: {value:.2f}\n"
                report += "\n"
        
        report += "\n## 文件说明\n\n"
        report += "本次分析生成的文件包括:\n"
        report += "- `charts/`: 可视化图表文件\n"
        report += "- `reports/`: 分析报告和结果文件\n"
        report += "- `data/`: 原始数据和清洗后的数据文件\n\n"
        report += "---\n*报告由 MCP Survey Analyzer 自动生成*"
        
        return report

# 创建全局分析器实例
analyzer = SurveyAnalyzer()

# MCP工具定义
@mcp.tool()
def load_survey_data(file_path: str) -> str:
    """加载问卷调查数据文件（支持CSV和Excel格式）
    
    Args:
        file_path: 数据文件路径
        
    Returns:
        数据加载结果和基本信息
    """
    result = analyzer.load_data(file_path)
    return json.dumps(result, ensure_ascii=False, indent=2)

@mcp.tool()
def clean_survey_data(remove_duplicates: bool = False) -> str:
    """清洗问卷数据，处理缺失值、重复值和异常值
    
    Args:
        remove_duplicates: 是否删除重复数据，默认为False（保留重复值）
    
    Returns:
        数据清洗结果和统计信息
    """
    result = analyzer.clean_data(remove_duplicates=remove_duplicates)
    return json.dumps(result, ensure_ascii=False, indent=2)

@mcp.tool()
def analyze_survey_statistics() -> str:
    """进行问卷数据统计分析，包括描述性统计和相关性分析
    
    Returns:
        统计分析结果
    """
    result = analyzer.statistical_analysis()
    return json.dumps(result, ensure_ascii=False, indent=2)

@mcp.tool()
def perform_cluster_analysis(n_clusters: int = 3, features: Optional[str] = None) -> str:
    """对问卷数据进行聚类分析，识别用户群体
    
    Args:
        n_clusters: 聚类数量，默认为3
        features: 用于聚类的特征列名，用逗号分隔，默认使用所有数值列
        
    Returns:
        聚类分析结果和群体特征
    """
    feature_list = None
    if features:
        feature_list = [f.strip() for f in features.split(',')]
    
    result = analyzer.cluster_analysis(n_clusters, feature_list)
    return json.dumps(result, ensure_ascii=False, indent=2)

@mcp.tool()
def create_visualization(chart_type: str, column: Optional[str] = None, features: Optional[str] = None) -> str:
    """生成数据可视化图表
    
    Args:
        chart_type: 图表类型 (correlation_heatmap, cluster_scatter, distribution)
        column: 用于分布图的列名
        features: 用于散点图的特征列名，用逗号分隔
        
    Returns:
        可视化结果（包含base64编码的图片）
    """
    kwargs = {}
    if column:
        kwargs['column'] = column
    if features:
        kwargs['features'] = [f.strip() for f in features.split(',')]
    
    result = analyzer.generate_visualization(chart_type, **kwargs)
    return json.dumps(result, ensure_ascii=False, indent=2)

@mcp.tool()
def generate_user_profile() -> str:
    """生成用户群体画像和关键洞察
    
    Returns:
        用户群体画像分析结果
    """
    result = analyzer.generate_user_profile()
    return json.dumps(result, ensure_ascii=False, indent=2)

@mcp.tool()
def save_analysis_to_local(save_data: bool = True) -> str:
    """保存所有分析结果到本地文件
    
    Args:
        save_data: 是否保存原始数据和清洗后的数据，默认为True
        
    Returns:
        保存结果和文件路径信息
    """
    result = analyzer.save_analysis_results(save_data)
    return json.dumps(result, ensure_ascii=False, indent=2)

@mcp.tool()
def get_saved_files_info() -> str:
    """获取已保存文件的信息
    
    Returns:
        已保存文件的路径列表和统计信息
    """
    info = {
        "saved_files_count": len(analyzer.saved_files),
        "saved_files": analyzer.saved_files,
        "output_directory": OUTPUT_DIR,
        "charts_directory": CHARTS_DIR,
        "reports_directory": REPORTS_DIR,
        "data_directory": DATA_DIR
    }
    return json.dumps(info, ensure_ascii=False, indent=2)

@mcp.tool()
def set_custom_output_directory(directory_path: str) -> str:
    """设置自定义的输出目录路径
    
    Args:
        directory_path: 自定义输出目录的绝对路径
        
    Returns:
        设置结果信息
    """
    try:
        success = set_output_directory(directory_path)
        if success:
            result = {
                "success": True,
                "message": f"输出目录已设置为: {directory_path}",
                "output_directory": OUTPUT_DIR,
                "charts_directory": CHARTS_DIR,
                "reports_directory": REPORTS_DIR,
                "data_directory": DATA_DIR
            }
        else:
            result = {
                "success": False,
                "error": "设置输出目录失败，请检查路径是否有效"
            }
        return json.dumps(result, ensure_ascii=False, indent=2)
    except Exception as e:
        result = {
            "success": False,
            "error": f"设置输出目录时发生错误: {str(e)}"
        }
        return json.dumps(result, ensure_ascii=False, indent=2)

@mcp.tool()
def get_analysis_summary() -> str:
    """获取完整的分析摘要报告
    
    Returns:
        包含所有分析结果的综合报告
    """
    if analyzer.cleaned_data is None:
        return json.dumps({"error": "请先加载和清洗数据"}, ensure_ascii=False)
    
    summary = {
        "data_info": {
            "shape": analyzer.cleaned_data.shape,
            "columns": list(analyzer.cleaned_data.columns)
        },
        "has_statistical_analysis": bool(analyzer.analysis_results),
        "has_cluster_analysis": bool(analyzer.cluster_results),
        "analysis_timestamp": datetime.now().isoformat(),
        "saved_files_count": len(analyzer.saved_files),
        "output_directory": OUTPUT_DIR
    }
    
    if analyzer.analysis_results:
        summary["statistical_results"] = analyzer.analysis_results
    
    if analyzer.cluster_results:
        summary["cluster_results"] = analyzer.cluster_results
    
    return json.dumps(summary, ensure_ascii=False, indent=2)

# MCP资源定义
@mcp.resource("survey://data/{data_type}")
def get_survey_data_resource(data_type: str) -> str:
    """获取问卷数据资源
    
    Args:
        data_type: 数据类型 (raw, cleaned, analysis, clusters)
        
    Returns:
        对应类型的数据资源
    """
    if data_type == "raw" and analyzer.data is not None:
        return analyzer.data.to_json(orient='records', force_ascii=False)
    elif data_type == "cleaned" and analyzer.cleaned_data is not None:
        return analyzer.cleaned_data.to_json(orient='records', force_ascii=False)
    elif data_type == "analysis":
        return json.dumps(analyzer.analysis_results, ensure_ascii=False, indent=2)
    elif data_type == "clusters":
        return json.dumps(analyzer.cluster_results, ensure_ascii=False, indent=2)
    else:
        return json.dumps({"error": f"未找到数据类型: {data_type}"}, ensure_ascii=False)

if __name__ == "__main__":
    mcp.run(transport="stdio")