from mcp.server.fastmcp import FastMCP
import numpy as np
import pandas as pd
from typing import List, Dict, Optional, Union
import json
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from datetime import datetime
import seaborn as sns
from scipy.optimize import curve_fit
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
import warnings
import base64
import io
from pathlib import Path
warnings.filterwarnings('ignore')

def set_plot_style():
    """设置绘图样式，具有回退机制"""
    styles_to_try = ['seaborn-v0_8', 'seaborn', 'ggplot', 'default']
    for style in styles_to_try:
        try:
            plt.style.use(style)
            break
        except OSError:
            continue
    
    # 设置中文字体，具有回退机制
    fonts_to_try = ['SimHei', 'Microsoft YaHei', 'DejaVu Sans', 'Arial Unicode MS']
    for font in fonts_to_try:
        try:
            plt.rcParams['font.sans-serif'] = [font]
            plt.rcParams['axes.unicode_minus'] = False
            break
        except:
            continue
    
    # 其他显示参数
    plt.rcParams['figure.autolayout'] = True  # 自动调整布局
    plt.rcParams['axes.titlepad'] = 20  # 标题间距
    plt.rcParams['axes.labelpad'] = 10  # 轴标签间距

# 设置matplotlib中文字体
set_plot_style()

# 创建MCP服务器
mcp = FastMCP("Population Prediction Server")

class PopulationPredictor:
    """人口预测模型类"""
    
    def __init__(self):
        self.models = {}
        self.data = None
        self.results = {}
    
    def preprocess_data(self, data: List[Dict], year_col: str = 'year', pop_col: str = 'population'):
        """数据预处理 - 增强版数据清洗"""
        df = pd.DataFrame(data)
        
        # 记录原始数据状态
        original_rows = len(df)
        cleaning_report = {}
        
        # 1. 处理缺失值
        missing_before = df.isnull().sum().sum()
        df = df.dropna(subset=[year_col, pop_col])
        df[year_col] = pd.to_numeric(df[year_col], errors='coerce')
        df[pop_col] = pd.to_numeric(df[pop_col], errors='coerce')
        df = df.dropna()
        
        # 对数值列使用均值填充剩余缺失值
        numeric_cols = df.select_dtypes(include=[np.number]).columns
        df[numeric_cols] = df[numeric_cols].fillna(df[numeric_cols].mean())
        
        # 2. 移除重复行
        duplicates_before = df.duplicated().sum()
        df = df.drop_duplicates()
        
        # 3. 异常值检测和处理 - 使用IQR方法（更稳健）
        outliers_removed = 0
        for col in [pop_col]:  # 主要针对人口列进行异常值检测
            if col in df.columns:
                Q1 = df[col].quantile(0.25)
                Q3 = df[col].quantile(0.75)
                IQR = Q3 - Q1
                lower_bound = Q1 - 1.5 * IQR
                upper_bound = Q3 + 1.5 * IQR
                outliers_before = len(df)
                df = df[(df[col] >= lower_bound) & (df[col] <= upper_bound)]
                outliers_removed += outliers_before - len(df)
        
        # 4. 数据类型优化
        for col in numeric_cols:
            if col in df.columns:
                if df[col].dtype == 'float64':
                    df[col] = pd.to_numeric(df[col], downcast='float')
                elif df[col].dtype == 'int64':
                    df[col] = pd.to_numeric(df[col], downcast='integer')
        
        # 5. 数据排序
        df = df.sort_values(year_col)
        
        # 6. 数据验证
        if len(df) < 3:
            raise ValueError(f"清洗后数据量不足（{len(df)}行），无法进行有效预测")
        
        final_rows = len(df)
        
        # 生成清洗报告
        cleaning_report = {
            'original_rows': original_rows,
            'missing_values_handled': missing_before,
            'duplicates_removed': duplicates_before,
            'outliers_removed': outliers_removed,
            'final_rows': final_rows,
            'data_columns': len(df.columns),
            'numeric_columns': list(numeric_cols),
            'data_quality_score': round((final_rows / original_rows) * 100, 2)
        }
        
        # 存储清洗报告
        self.cleaning_report = cleaning_report
        self.data = df
        
        return df
    
    def get_cleaning_report(self):
        """获取数据清洗报告"""
        if hasattr(self, 'cleaning_report'):
            return self.cleaning_report
        else:
            return {'error': '尚未进行数据预处理，请先调用preprocess_data方法'}
    
    def linear_regression(self, years: np.ndarray, population: np.ndarray):
        """线性回归模型"""
        X = years.reshape(-1, 1)
        model = LinearRegression()
        model.fit(X, population)
        
        predictions = model.predict(X)
        mse = mean_squared_error(population, predictions)
        r2 = r2_score(population, predictions)
        
        return {
            'model': model,
            'predictions': predictions,
            'mse': mse,
            'r2': r2,
            'coefficients': {'slope': model.coef_[0], 'intercept': model.intercept_}
        }
    
    def logistic_regression(self, years: np.ndarray, population: np.ndarray):
        """Logistic增长模型"""
        def logistic_func(t, K, r, t0):
            return K / (1 + np.exp(-r * (t - t0)))
        
        try:
            # 初始参数估计
            K_init = population.max() * 1.2  # 环境容量
            r_init = 0.1  # 增长率
            t0_init = years.mean()  # 拐点时间
            
            popt, _ = curve_fit(logistic_func, years, population, 
                              p0=[K_init, r_init, t0_init],
                              maxfev=5000)
            
            predictions = logistic_func(years, *popt)
            mse = mean_squared_error(population, predictions)
            r2 = r2_score(population, predictions)
            
            return {
                'model': logistic_func,
                'parameters': {'K': popt[0], 'r': popt[1], 't0': popt[2]},
                'predictions': predictions,
                'mse': mse,
                'r2': r2
            }
        except Exception as e:
            return {'error': f'Logistic模型拟合失败: {str(e)}'}
    
    def grey_model(self, population: np.ndarray):
        """灰色预测模型 GM(1,1)"""
        n = len(population)
        if n < 4:
            return {'error': 'GM(1,1)模型需要至少4个数据点'}
        
        # 累加生成
        x1 = np.cumsum(population)
        
        # 构造数据矩阵
        B = np.zeros((n-1, 2))
        Y = population[1:].reshape(-1, 1)
        
        for i in range(n-1):
            B[i, 0] = -(x1[i] + x1[i+1]) / 2
            B[i, 1] = 1
        
        # 最小二乘估计
        try:
            params = np.linalg.inv(B.T @ B) @ B.T @ Y
            a, b = params[0, 0], params[1, 0]
            
            # 预测
            predictions = np.zeros(n)
            predictions[0] = population[0]
            
            for k in range(1, n):
                x1_pred = (population[0] - b/a) * np.exp(-a * k) + b/a
                if k == 1:
                    predictions[k] = x1_pred - predictions[0]
                else:
                    predictions[k] = x1_pred - np.sum(predictions[:k])
            
            mse = mean_squared_error(population, predictions)
            r2 = r2_score(population, predictions)
            
            return {
                'parameters': {'a': a, 'b': b},
                'predictions': predictions,
                'mse': mse,
                'r2': r2
            }
        except Exception as e:
            return {'error': f'GM(1,1)模型计算失败: {str(e)}'}
    
    def entropy_weight_combination(self, predictions_dict: Dict):
        """熵权法组合预测
        
        基于模型性能指标（R²得分和MSE）使用熵权法计算各模型权重
        数学公式参考项目立项书：
        - 熵值计算：e_j = -k * Σ(p_ij * ln(p_ij))
        - 权重计算：w_j = (1 - e_j) / (m - Σe_j)
        """
        valid_models = {k: v for k, v in predictions_dict.items() 
                       if 'error' not in v and 'predictions' in v}
        
        if len(valid_models) < 2:
            return {'error': '需要至少2个有效模型进行组合'}
        
        # 构造性能指标决策矩阵
        model_names = list(valid_models.keys())
        n_models = len(model_names)
        
        # 提取性能指标：R²得分（越大越好）和MSE（越小越好）
        r2_scores = np.array([valid_models[name]['r2'] for name in model_names])
        mse_values = np.array([valid_models[name]['mse'] for name in model_names])
        
        # 对MSE取倒数，使其变为越大越好的指标
        mse_inverse = 1.0 / (mse_values + 1e-10)  # 避免除零
        
        # 构造决策矩阵 [R²得分, MSE倒数]
        decision_matrix = np.array([r2_scores, mse_inverse])
        m, n = decision_matrix.shape  # m=指标数量, n=模型数量
        
        # 归一化决策矩阵
        normalized_matrix = np.zeros_like(decision_matrix)
        for i in range(m):
            col_sum = decision_matrix[i, :].sum()
            if col_sum > 0:
                normalized_matrix[i, :] = decision_matrix[i, :] / col_sum
            else:
                normalized_matrix[i, :] = 1.0 / n  # 均匀分布
        
        # 计算各指标的熵值
        k = 1.0 / np.log(n) if n > 1 else 1.0
        entropy = np.zeros(m)
        
        for i in range(m):
            p = normalized_matrix[i, :]
            p = p[p > 1e-10]  # 避免log(0)
            if len(p) > 0:
                entropy[i] = -k * np.sum(p * np.log(p))
        
        # 计算各指标的权重
        indicator_weights = np.zeros(m)
        entropy_sum = entropy.sum()
        if entropy_sum < m:
            indicator_weights = (1 - entropy) / (m - entropy_sum)
        else:
            indicator_weights = np.ones(m) / m  # 均匀权重
        
        # 计算各模型的综合得分
        model_scores = np.zeros(n)
        for j in range(n):
            for i in range(m):
                model_scores[j] += indicator_weights[i] * normalized_matrix[i, j]
        
        # 归一化模型权重
        model_weights = model_scores / model_scores.sum()
        
        # 使用权重组合预测结果
        predictions_matrix = np.array([valid_models[name]['predictions'] for name in model_names]).T
        combined_predictions = np.sum(predictions_matrix * model_weights, axis=1)
        
        return {
            'weights': dict(zip(model_names, model_weights)),
            'predictions': combined_predictions,
            'models_used': model_names,
            'performance_analysis': {
                'r2_scores': dict(zip(model_names, r2_scores)),
                'mse_values': dict(zip(model_names, mse_values)),
                'indicator_weights': {
                    'r2_weight': float(indicator_weights[0]),
                    'mse_weight': float(indicator_weights[1])
                },
                'model_scores': dict(zip(model_names, model_scores))
            }
        }

# 创建全局预测器实例
predictor = PopulationPredictor()

@mcp.tool()
def preprocess_population_data(data: List[Dict[str, Union[int, float]]], 
                              year_column: str = "year", 
                              population_column: str = "population") -> str:
    """预处理人口数据
    
    Args:
        data: 包含年份和人口数据的字典列表
        year_column: 年份列名
        population_column: 人口列名
        
    Returns:
        预处理结果的JSON字符串
    """
    try:
        df = predictor.preprocess_data(data, year_column, population_column)
        
        # 获取清洗报告
        cleaning_report = predictor.get_cleaning_report()
        
        # 转换数据类型以确保JSON序列化
        data_sample = df.head().to_dict('records')
        for record in data_sample:
            for key, value in record.items():
                if hasattr(value, 'item'):  # numpy类型
                    record[key] = value.item()
                elif pd.isna(value):
                    record[key] = None
        
        # 确保清洗报告中的数值可以JSON序列化
        serializable_report = {}
        for key, value in cleaning_report.items():
            if hasattr(value, 'item'):  # numpy类型
                serializable_report[key] = value.item()
            elif isinstance(value, list):
                serializable_report[key] = [v.item() if hasattr(v, 'item') else v for v in value]
            else:
                serializable_report[key] = value
        
        result = {
            "status": "success",
            "message": "数据预处理完成",
            "data_info": {
                "total_records": int(len(df)),
                "year_range": [int(df[year_column].min()), int(df[year_column].max())],
                "population_range": [float(df[population_column].min()), float(df[population_column].max())],
                "data_sample": data_sample
            },
            "cleaning_report": serializable_report
        }
        
        return json.dumps(result, ensure_ascii=False, indent=2)
        
    except Exception as e:
        return json.dumps({
            "status": "error",
            "message": f"数据预处理失败: {str(e)}"
        }, ensure_ascii=False, indent=2)

@mcp.tool()
def get_data_cleaning_report() -> str:
    """获取数据清洗报告
    
    Returns:
        数据清洗报告的JSON字符串
    """
    try:
        cleaning_report = predictor.get_cleaning_report()
        
        if 'error' in cleaning_report:
            return json.dumps({
                "status": "error",
                "message": cleaning_report['error']
            }, ensure_ascii=False, indent=2)
        
        # 确保清洗报告中的数值可以JSON序列化
        serializable_report = {}
        for key, value in cleaning_report.items():
            if hasattr(value, 'item'):  # numpy类型
                serializable_report[key] = value.item()
            elif isinstance(value, list):
                serializable_report[key] = [v.item() if hasattr(v, 'item') else v for v in value]
            else:
                serializable_report[key] = value
        
        result = {
            "status": "success",
            "message": "数据清洗报告获取成功",
            "cleaning_report": serializable_report,
            "summary": {
                "data_retention_rate": f"{serializable_report.get('data_quality_score', 0)}%",
                "total_issues_resolved": int(
                    serializable_report.get('missing_values_handled', 0) + 
                    serializable_report.get('duplicates_removed', 0) + 
                    serializable_report.get('outliers_removed', 0)
                ),
                "final_data_quality": "优秀" if serializable_report.get('data_quality_score', 0) >= 90 else 
                                     "良好" if serializable_report.get('data_quality_score', 0) >= 80 else 
                                     "一般" if serializable_report.get('data_quality_score', 0) >= 70 else "需要改进"
            }
        }
        
        return json.dumps(result, ensure_ascii=False, indent=2)
        
    except Exception as e:
        return json.dumps({
            "status": "error",
            "message": f"获取清洗报告失败: {str(e)}"
        }, ensure_ascii=False, indent=2)

@mcp.tool()
def predict_population_linear(predict_years: List[int]) -> str:
    """使用线性回归模型预测人口
    
    Args:
        predict_years: 需要预测的年份列表
        
    Returns:
        预测结果的JSON字符串
    """
    try:
        if predictor.data is None:
            return json.dumps({
                "status": "error",
                "message": "请先调用preprocess_population_data进行数据预处理"
            }, ensure_ascii=False)
        
        df = predictor.data
        years = df.iloc[:, 0].values  # 第一列为年份
        population = df.iloc[:, 1].values  # 第二列为人口
        
        # 训练模型
        result = predictor.linear_regression(years, population)
        
        # 预测
        predict_X = np.array(predict_years).reshape(-1, 1)
        predictions = result['model'].predict(predict_X)
        
        # 保存结果
        predictor.results['linear'] = result
        
        response = {
            "status": "success",
            "model": "线性回归",
            "model_performance": {
                "mse": float(result['mse']),
                "r2_score": float(result['r2']),
                "coefficients": result['coefficients']
            },
            "predictions": {
                str(year): float(pred) for year, pred in zip(predict_years, predictions)
            }
        }
        
        return json.dumps(response, ensure_ascii=False, indent=2)
        
    except Exception as e:
        return json.dumps({
            "status": "error",
            "message": f"线性回归预测失败: {str(e)}"
        }, ensure_ascii=False, indent=2)

@mcp.tool()
def predict_population_logistic(predict_years: List[int]) -> str:
    """使用Logistic模型预测人口
    
    Args:
        predict_years: 需要预测的年份列表
        
    Returns:
        预测结果的JSON字符串
    """
    try:
        if predictor.data is None:
            return json.dumps({
                "status": "error",
                "message": "请先调用preprocess_population_data进行数据预处理"
            }, ensure_ascii=False)
        
        df = predictor.data
        years = df.iloc[:, 0].values
        population = df.iloc[:, 1].values
        
        # 训练模型
        result = predictor.logistic_regression(years, population)
        
        if 'error' in result:
            return json.dumps({
                "status": "error",
                "message": result['error']
            }, ensure_ascii=False)
        
        # 预测
        def logistic_func(t, K, r, t0):
            return K / (1 + np.exp(-r * (t - t0)))
        
        params = result['parameters']
        predictions = [logistic_func(year, params['K'], params['r'], params['t0']) 
                      for year in predict_years]
        
        # 保存结果
        predictor.results['logistic'] = result
        
        response = {
            "status": "success",
            "model": "Logistic增长模型",
            "model_performance": {
                "mse": float(result['mse']),
                "r2_score": float(result['r2']),
                "parameters": {
                    "K": float(params['K']),
                    "r": float(params['r']),
                    "t0": float(params['t0'])
                }
            },
            "predictions": {
                str(year): float(pred) for year, pred in zip(predict_years, predictions)
            }
        }
        
        return json.dumps(response, ensure_ascii=False, indent=2)
        
    except Exception as e:
        return json.dumps({
            "status": "error",
            "message": f"Logistic模型预测失败: {str(e)}"
        }, ensure_ascii=False, indent=2)

@mcp.tool()
def predict_population_grey(predict_steps: int) -> str:
    """使用灰色预测模型GM(1,1)预测人口
    
    Args:
        predict_steps: 向前预测的步数
        
    Returns:
        预测结果的JSON字符串
    """
    try:
        if predictor.data is None:
            return json.dumps({
                "status": "error",
                "message": "请先调用preprocess_population_data进行数据预处理"
            }, ensure_ascii=False)
        
        df = predictor.data
        population = df.iloc[:, 1].values
        years = df.iloc[:, 0].values
        
        # 训练模型
        result = predictor.grey_model(population)
        
        if 'error' in result:
            return json.dumps({
                "status": "error",
                "message": result['error']
            }, ensure_ascii=False)
        
        # 预测未来步数
        a, b = result['parameters']['a'], result['parameters']['b']
        future_predictions = []
        
        for k in range(len(population), len(population) + predict_steps):
            x1_pred = (population[0] - b/a) * np.exp(-a * k) + b/a
            if k == len(population):
                pred = x1_pred - np.sum(population)
            else:
                pred = x1_pred - (population[0] - b/a) * np.exp(-a * (k-1)) - b/a
            future_predictions.append(pred)
        
        # 生成预测年份
        future_years = [int(years[-1] + i + 1) for i in range(predict_steps)]
        
        # 保存结果
        predictor.results['grey'] = result
        
        response = {
            "status": "success",
            "model": "灰色预测模型GM(1,1)",
            "model_performance": {
                "mse": float(result['mse']),
                "r2_score": float(result['r2']),
                "parameters": {
                    "a": float(a),
                    "b": float(b)
                }
            },
            "predictions": {
                str(year): float(pred) for year, pred in zip(future_years, future_predictions)
            }
        }
        
        return json.dumps(response, ensure_ascii=False, indent=2)
        
    except Exception as e:
        return json.dumps({
            "status": "error",
            "message": f"灰色预测模型失败: {str(e)}"
        }, ensure_ascii=False, indent=2)

@mcp.tool()
def combine_predictions_entropy() -> str:
    """使用熵权法组合多个模型的预测结果
    
    Returns:
        组合预测结果的JSON字符串
    """
    try:
        if not predictor.results:
            return json.dumps({
                "status": "error",
                "message": "没有可用的预测结果，请先运行预测模型"
            }, ensure_ascii=False)
        
        # 组合预测
        result = predictor.entropy_weight_combination(predictor.results)
        
        if 'error' in result:
            return json.dumps({
                "status": "error",
                "message": result['error']
            }, ensure_ascii=False)
        
        response = {
            "status": "success",
            "model": "熵权法组合模型",
            "models_used": result['models_used'],
            "weights": {k: float(v) for k, v in result['weights'].items()},
            "combined_predictions": [float(x) for x in result['predictions']],
            "performance_analysis": result.get('performance_analysis', {})
        }
        
        return json.dumps(response, ensure_ascii=False, indent=2)
        
    except Exception as e:
        return json.dumps({
            "status": "error",
            "message": f"熵权法组合失败: {str(e)}"
        }, ensure_ascii=False, indent=2)

@mcp.tool()
def generate_prediction_report() -> str:
    """生成预测分析报告
    
    Returns:
        分析报告的JSON字符串
    """
    try:
        if predictor.data is None:
            return json.dumps({
                "status": "error",
                "message": "没有数据可用于生成报告"
            }, ensure_ascii=False)
        
        df = predictor.data
        
        # 基础统计信息
        stats = {
            "数据概览": {
                "数据点数量": len(df),
                "年份范围": f"{int(df.iloc[:, 0].min())}-{int(df.iloc[:, 0].max())}",
                "人口范围": f"{df.iloc[:, 1].min():.0f}-{df.iloc[:, 1].max():.0f}",
                "平均年增长率": f"{((df.iloc[-1, 1] / df.iloc[0, 1]) ** (1/(len(df)-1)) - 1) * 100:.2f}%"
            }
        }
        
        # 模型性能比较
        if predictor.results:
            model_comparison = {}
            for model_name, result in predictor.results.items():
                if 'error' not in result:
                    model_comparison[model_name] = {
                        "R²得分": f"{result['r2']:.4f}",
                        "均方误差": f"{result['mse']:.2f}",
                        "模型质量": "优秀" if result['r2'] > 0.9 else "良好" if result['r2'] > 0.7 else "一般"
                    }
            stats["模型性能比较"] = model_comparison
        
        # 生成建议
        recommendations = []
        if predictor.results:
            best_model = max(predictor.results.items(), 
                           key=lambda x: x[1].get('r2', 0) if 'error' not in x[1] else 0)
            recommendations.append(f"推荐使用{best_model[0]}模型，R²得分最高: {best_model[1]['r2']:.4f}")
        
        if len(df) < 10:
            recommendations.append("数据点较少，建议收集更多历史数据以提高预测精度")
        
        stats["使用建议"] = recommendations
        
        response = {
            "status": "success",
            "report_title": "人口预测分析报告",
            "analysis_results": stats,
            "generation_time": pd.Timestamp.now().strftime("%Y-%m-%d %H:%M:%S")
        }
        
        return json.dumps(response, ensure_ascii=False, indent=2)
        
    except Exception as e:
        return json.dumps({
            "status": "error",
            "message": f"报告生成失败: {str(e)}"
        }, ensure_ascii=False, indent=2)

@mcp.tool()
def visualize_historical_trend() -> str:
    """生成历史人口数据趋势图
    
    Returns:
        包含图表base64编码的JSON字符串
    """
    try:
        if predictor.data is None:
            return json.dumps({
                "status": "error",
                "message": "请先调用preprocess_population_data进行数据预处理"
            }, ensure_ascii=False)
        
        df = predictor.data
        years = df.iloc[:, 0].values
        population = df.iloc[:, 1].values
        
        # 使用改进的样式设置
        set_plot_style()
        fig, ax = plt.subplots(figsize=(14, 8))
        
        # 绘制主要数据线
        ax.plot(years, population, 'o-', linewidth=3, markersize=8, 
                color='#2E86AB', alpha=0.8, label='历史人口数据')
        
        # 添加趋势线
        z = np.polyfit(years, population, 1)
        p = np.poly1d(z)
        ax.plot(years, p(years), "--", alpha=0.7, linewidth=2, 
                color='#F24236', label=f'线性趋势 (年增长: {z[0]:.1f}万人)')
        
        # 设置标题和标签
        ax.set_title('历史人口数据趋势分析', fontsize=18, fontweight='bold', pad=20)
        ax.set_xlabel('年份', fontsize=14, fontweight='bold')
        ax.set_ylabel('人口数量 (万人)', fontsize=14, fontweight='bold')
        
        # 美化网格
        ax.grid(True, alpha=0.3, linestyle='-', linewidth=0.5)
        ax.set_facecolor('#f8f9fa')
        
        # 设置图例
        ax.legend(loc='upper left', fontsize=12, frameon=True, 
                 fancybox=True, shadow=True)
        
        # 设置坐标轴格式
        ax.tick_params(axis='both', which='major', labelsize=12)
        # 旋转x轴标签避免重叠
        plt.setp(ax.get_xticklabels(), rotation=45, ha='right')
        
        # 添加数据范围信息
        data_range = f"数据范围: {int(years.min())}-{int(years.max())}年\n" + \
                    f"人口范围: {population.min():.0f}-{population.max():.0f}万人"
        ax.text(0.02, 0.98, data_range, transform=ax.transAxes, 
                fontsize=10, verticalalignment='top',
                bbox=dict(boxstyle='round', facecolor='white', alpha=0.8))
        
        plt.tight_layout()
        
        # 保存图片到本地文件
        output_dir = Path(__file__).parent / "图片"
        output_dir.mkdir(parents=True, exist_ok=True)
        
        timestamp = pd.Timestamp.now().strftime("%Y%m%d_%H%M%S")
        filename = f"历史趋势图_{timestamp}.png"
        filepath = output_dir / filename
        
        plt.savefig(filepath, format='png', dpi=300, bbox_inches='tight',
                   facecolor='white', edgecolor='none')
        
        # 转换为base64
        buffer = io.BytesIO()
        plt.savefig(buffer, format='png', dpi=300, bbox_inches='tight', 
                   facecolor='white', edgecolor='none')
        buffer.seek(0)
        image_base64 = base64.b64encode(buffer.getvalue()).decode()
        plt.close()
        
        response = {
            "status": "success",
            "chart_type": "历史趋势图",
            "image_base64": image_base64,
            "description": "显示历史人口数据的时间序列趋势",
            "saved_file": str(filepath)
        }
        
        return json.dumps(response, ensure_ascii=False, indent=2)
        
    except Exception as e:
        return json.dumps({
            "status": "error",
            "message": f"趋势图生成失败: {str(e)}"
        }, ensure_ascii=False, indent=2)

@mcp.tool()
def visualize_model_comparison() -> str:
    """生成模型预测对比图
    
    Returns:
        包含图表base64编码的JSON字符串
    """
    try:
        if predictor.data is None or not predictor.results:
            return json.dumps({
                "status": "error",
                "message": "请先进行数据预处理和模型预测"
            }, ensure_ascii=False)
        
        df = predictor.data
        years = df.iloc[:, 0].values
        population = df.iloc[:, 1].values
        
        # 使用改进的样式设置
        set_plot_style()
        fig, ax = plt.subplots(figsize=(18, 12))  # 增大图表尺寸
        
        # 绘制历史数据
        ax.scatter(years, population, color='#2C3E50', s=80, alpha=0.8, 
                  label='历史数据', zorder=5, edgecolors='white', linewidth=1)
        ax.plot(years, population, color='#2C3E50', linewidth=2, alpha=0.6, zorder=4)
        
        # 定义模型颜色和样式
        model_styles = {
            'linear': {'color': '#E74C3C', 'linestyle': '--', 'label': '线性回归'},
            'logistic': {'color': '#3498DB', 'linestyle': '-.', 'label': 'Logistic模型'},
            'grey': {'color': '#27AE60', 'linestyle': ':', 'label': '灰色预测GM(1,1)'}
        }
        
        # 绘制各模型的拟合结果
        for model_name, result in predictor.results.items():
            if 'error' not in result and 'predictions' in result:
                predictions = result['predictions']
                r2 = result['r2']
                mse = result['mse']
                
                style = model_styles.get(model_name, {
                    'color': '#9B59B6', 'linestyle': '-', 'label': model_name
                })
                
                ax.plot(years, predictions, 
                       color=style['color'], 
                       linestyle=style['linestyle'],
                       linewidth=3, 
                       alpha=0.8,
                       label=f"{style['label']} (R²={r2:.4f}, MSE={mse:.0f})")
        
        # 设置标题和标签
        ax.set_title('人口预测模型拟合效果对比', fontsize=18, fontweight='bold', pad=20)
        ax.set_xlabel('年份', fontsize=14, fontweight='bold')
        ax.set_ylabel('人口数量 (万人)', fontsize=14, fontweight='bold')
        
        # 美化网格和背景
        ax.grid(True, alpha=0.3, linestyle='-', linewidth=0.5)
        ax.set_facecolor('#f8f9fa')
        
        # 设置图例
        ax.legend(loc='upper left', fontsize=12, frameon=True, 
                 fancybox=True, shadow=True, ncol=1)
        
        # 设置坐标轴格式
        ax.tick_params(axis='both', which='major', labelsize=11)
        
        # 添加性能统计信息
        stats_text = "模型性能统计:\n"
        for model_name, result in predictor.results.items():
            if 'error' not in result:
                stats_text += f"• {model_name}: R²={result['r2']:.4f}\n"
        
        ax.text(0.98, 0.02, stats_text, transform=ax.transAxes, 
                fontsize=10, verticalalignment='bottom', horizontalalignment='right',
                bbox=dict(boxstyle='round', facecolor='white', alpha=0.9))
        
        plt.tight_layout()
        
        # 保存图片到本地文件
        output_dir = Path(__file__).parent / "图片"
        output_dir.mkdir(parents=True, exist_ok=True)
        
        timestamp = pd.Timestamp.now().strftime("%Y%m%d_%H%M%S")
        filename = f"模型对比图_{timestamp}.png"
        filepath = output_dir / filename
        
        plt.savefig(filepath, format='png', dpi=300, bbox_inches='tight',
                   facecolor='white', edgecolor='none')
        
        # 转换为base64
        buffer = io.BytesIO()
        plt.savefig(buffer, format='png', dpi=300, bbox_inches='tight',
                   facecolor='white', edgecolor='none')
        buffer.seek(0)
        image_base64 = base64.b64encode(buffer.getvalue()).decode()
        plt.close()
        
        response = {
            "status": "success",
            "chart_type": "模型对比图",
            "image_base64": image_base64,
            "description": "比较不同预测模型的拟合效果",
            "saved_file": str(filepath)
        }
        
        return json.dumps(response, ensure_ascii=False, indent=2)
        
    except Exception as e:
        return json.dumps({
            "status": "error",
            "message": f"模型对比图生成失败: {str(e)}"
        }, ensure_ascii=False, indent=2)

@mcp.tool()
def visualize_model_performance() -> str:
    """生成模型性能对比图
    
    Returns:
        包含图表base64编码的JSON字符串
    """
    try:
        if not predictor.results:
            return json.dumps({
                "status": "error",
                "message": "没有可用的模型结果"
            }, ensure_ascii=False)
        
        # 提取模型性能数据
        model_data = []
        model_labels = {
            'linear': '线性回归',
            'logistic': 'Logistic模型', 
            'grey': '灰色预测GM(1,1)'
        }
        
        for model_name, result in predictor.results.items():
            if 'error' not in result:
                model_data.append({
                    'name': model_name,
                    'label': model_labels.get(model_name, model_name),
                    'r2': result['r2'],
                    'mse': result['mse']
                })
        
        if not model_data:
            return json.dumps({
                "status": "error",
                "message": "没有有效的模型性能数据"
            }, ensure_ascii=False)
        
        # 使用改进的样式设置
        set_plot_style()
        fig = plt.figure(figsize=(20, 10))  # 增大图表尺寸
        
        # 创建网格布局
        gs = fig.add_gridspec(2, 2, height_ratios=[3, 1], width_ratios=[1, 1], 
                             hspace=0.3, wspace=0.2)  # 增加间距
        ax1 = fig.add_subplot(gs[0, 0])
        ax2 = fig.add_subplot(gs[0, 1])
        ax3 = fig.add_subplot(gs[1, :])
        
        # 定义颜色方案
        colors = ['#3498DB', '#E74C3C', '#27AE60', '#F39C12', '#9B59B6']
        
        # R²得分柱状图
        model_names = [item['label'] for item in model_data]
        r2_scores = [item['r2'] for item in model_data]
        
        bars1 = ax1.bar(model_names, r2_scores, 
                        color=colors[:len(model_data)], alpha=0.8, 
                        edgecolor='white', linewidth=2)
        ax1.set_title('模型R²得分对比', fontsize=16, fontweight='bold', pad=15)
        ax1.set_ylabel('R²得分', fontsize=12, fontweight='bold')
        ax1.set_ylim(0, 1.05)
        ax1.grid(True, alpha=0.3, axis='y', linestyle='--')
        ax1.set_facecolor('#f8f9fa')
        # 旋转x轴标签避免重叠
        plt.setp(ax1.get_xticklabels(), rotation=45, ha='right')
        
        # 添加数值标签和最佳标记
        best_r2_idx = r2_scores.index(max(r2_scores))
        for i, (bar, score) in enumerate(zip(bars1, r2_scores)):
            height = bar.get_height()
            ax1.text(bar.get_x() + bar.get_width()/2., height + 0.02,
                    f'{score:.4f}', ha='center', va='bottom', 
                    fontsize=11, fontweight='bold')
            if i == best_r2_idx:
                ax1.text(bar.get_x() + bar.get_width()/2., height + 0.08,
                        '★ 最佳', ha='center', va='bottom', 
                        fontsize=10, color='red', fontweight='bold')
        
        # MSE柱状图
        mse_values = [item['mse'] for item in model_data]
        bars2 = ax2.bar(model_names, mse_values, 
                        color=colors[:len(model_data)], alpha=0.8,
                        edgecolor='white', linewidth=2)
        ax2.set_title('模型均方误差(MSE)对比', fontsize=16, fontweight='bold', pad=15)
        ax2.set_ylabel('MSE', fontsize=12, fontweight='bold')
        ax2.grid(True, alpha=0.3, axis='y', linestyle='--')
        ax2.set_facecolor('#f8f9fa')
        # 旋转x轴标签避免重叠
        plt.setp(ax2.get_xticklabels(), rotation=45, ha='right')
        
        # 添加数值标签和最佳标记
        best_mse_idx = mse_values.index(min(mse_values))
        for i, (bar, mse) in enumerate(zip(bars2, mse_values)):
            height = bar.get_height()
            ax2.text(bar.get_x() + bar.get_width()/2., height,
                    f'{mse:.0f}', ha='center', va='bottom', 
                    fontsize=11, fontweight='bold')
            if i == best_mse_idx:
                ax2.text(bar.get_x() + bar.get_width()/2., height + max(mse_values) * 0.1,
                        '★ 最佳', ha='center', va='bottom', 
                        fontsize=10, color='red', fontweight='bold')
        
        # 综合性能表格
        ax3.axis('tight')
        ax3.axis('off')
        
        table_data = []
        for item in model_data:
            table_data.append([
                item['label'],
                f"{item['r2']:.4f}",
                f"{item['mse']:.0f}",
                "★" if item['r2'] == max(r2_scores) else "",
                "★" if item['mse'] == min(mse_values) else ""
            ])
        
        table = ax3.table(cellText=table_data,
                         colLabels=['模型名称', 'R²得分', 'MSE', 'R²最佳', 'MSE最佳'],
                         cellLoc='center',
                         loc='center',
                         colColours=['#E8F4FD'] * 5)
        
        table.auto_set_font_size(False)
        table.set_fontsize(14)  # 增大字体
        table.scale(1.2, 2.5)  # 调整表格尺寸
        
        # 设置表格样式
        for i in range(len(model_data) + 1):
            for j in range(5):
                cell = table[(i, j)]
                if i == 0:  # 表头
                    cell.set_text_props(weight='bold')
                    cell.set_facecolor('#3498DB')
                    cell.set_text_props(color='white')
                else:
                    cell.set_facecolor('#f8f9fa' if i % 2 == 0 else 'white')
        
        ax3.set_title('模型性能综合对比表', fontsize=14, fontweight='bold', pad=20)
        
        plt.tight_layout()
        
        # 保存图片到本地文件
        output_dir = Path(__file__).parent / "图片"
        output_dir.mkdir(parents=True, exist_ok=True)
        
        timestamp = pd.Timestamp.now().strftime("%Y%m%d_%H%M%S")
        filename = f"模型性能对比图_{timestamp}.png"
        filepath = output_dir / filename
        
        plt.savefig(filepath, format='png', dpi=300, bbox_inches='tight',
                   facecolor='white', edgecolor='none')
        
        # 转换为base64
        buffer = io.BytesIO()
        plt.savefig(buffer, format='png', dpi=300, bbox_inches='tight',
                   facecolor='white', edgecolor='none')
        buffer.seek(0)
        image_base64 = base64.b64encode(buffer.getvalue()).decode()
        plt.close()
        
        response = {
            "status": "success",
            "chart_type": "模型性能对比图",
            "image_base64": image_base64,
            "description": "显示各模型的R²得分和MSE指标对比",
            "performance_summary": {
                "best_r2_model": model_data[best_r2_idx]['name'],
                "best_mse_model": model_data[best_mse_idx]['name'],
                "best_r2_score": max(r2_scores),
                "best_mse_value": min(mse_values)
            },
            "saved_file": str(filepath)
        }
        
        return json.dumps(response, ensure_ascii=False, indent=2)
        
    except Exception as e:
        return json.dumps({
            "status": "error",
            "message": f"性能对比图生成失败: {str(e)}"
        }, ensure_ascii=False, indent=2)

@mcp.tool()
def visualize_prediction_forecast(predict_years: List[int]) -> str:
    """生成预测结果可视化图
    
    Args:
        predict_years: 需要预测的年份列表
        
    Returns:
        包含图表base64编码的JSON字符串
    """
    try:
        if predictor.data is None or not predictor.results:
            return json.dumps({
                "status": "error",
                "message": "请先进行数据预处理和模型预测"
            }, ensure_ascii=False)
        
        df = predictor.data
        years = df.iloc[:, 0].values
        population = df.iloc[:, 1].values
        
        # 使用改进的样式设置
        set_plot_style()
        fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(20, 14),  # 增大图表尺寸
                                      gridspec_kw={'height_ratios': [3, 1], 'hspace': 0.3})  # 增加间距
        
        # 主图：历史数据和预测趋势
        # 绘制历史数据
        ax1.plot(years, population, 'o-', color='#2C3E50', linewidth=3, 
                markersize=6, label='历史数据', alpha=0.9, zorder=5,
                markerfacecolor='white', markeredgewidth=2)
        
        # 定义模型颜色和样式
        model_styles = {
            'linear': {'color': '#E74C3C', 'marker': 's', 'linestyle': '--', 'label': '线性回归预测'},
            'logistic': {'color': '#27AE60', 'marker': '^', 'linestyle': '-.', 'label': 'Logistic增长预测'},
            'grey': {'color': '#F39C12', 'marker': 'D', 'linestyle': ':', 'label': '灰色预测GM(1,1)'}
        }
        
        # 存储预测数据用于表格显示
        prediction_data = {}
        
        for model_name, result in predictor.results.items():
            if 'error' not in result:
                # 获取预测值
                if model_name == 'linear':
                    predict_X = np.array(predict_years).reshape(-1, 1)
                    predictions = result['model'].predict(predict_X)
                elif model_name == 'logistic':
                    def logistic_func(t, K, r, t0):
                        return K / (1 + np.exp(-r * (t - t0)))
                    params = result['parameters']
                    predictions = [logistic_func(year, params['K'], params['r'], params['t0']) 
                                 for year in predict_years]
                elif model_name == 'grey':
                    # 修正的灰色模型预测
                    a, b = result['parameters']['a'], result['parameters']['b']
                    predictions = []
                    x1_cumsum = np.cumsum(population)
                    
                    for i, year in enumerate(predict_years):
                        k = len(population) + i
                        x1_pred = (population[0] - b/a) * np.exp(-a * k) + b/a
                        
                        if i == 0:
                            pred = x1_pred - x1_cumsum[-1]
                        else:
                            prev_x1 = (population[0] - b/a) * np.exp(-a * (k-1)) + b/a
                            pred = x1_pred - prev_x1
                        
                        predictions.append(max(pred, 0))  # 确保预测值非负
                
                # 确保预测值为正数
                predictions = [max(0, val) for val in predictions]
                prediction_data[model_name] = predictions
                
                # 连接历史数据最后一点和预测数据第一点
                connect_years = [years[-1]] + list(predict_years)
                connect_values = [population[-1]] + predictions
                
                style = model_styles.get(model_name, {
                    'color': '#34495E', 'marker': 'o', 'linestyle': '-', 
                    'label': model_name
                })
                
                ax1.plot(connect_years, connect_values, 
                        marker=style['marker'], linestyle=style['linestyle'],
                        color=style['color'], linewidth=2.5, 
                        markersize=8, label=style['label'],
                        alpha=0.8, markerfacecolor='white', markeredgewidth=2)
        
        # 美化主图
        ax1.set_title('人口预测结果可视化分析', fontsize=18, fontweight='bold', pad=20)
        ax1.set_xlabel('年份', fontsize=14, fontweight='bold')
        ax1.set_ylabel('人口数量 (万人)', fontsize=14, fontweight='bold')
        ax1.legend(fontsize=12, loc='upper left', frameon=True, 
                  fancybox=True, shadow=True, framealpha=0.9)
        ax1.grid(True, alpha=0.3, linestyle='--')
        ax1.set_facecolor('#f8f9fa')
        
        # 设置坐标轴范围和格式
        ax1.set_xlim(years[0] - 2, predict_years[-1] + 2)
        y_min, y_max = ax1.get_ylim()
        ax1.set_ylim(y_min * 0.95, y_max * 1.05)
        
        # 添加分界线和标注
        ax1.axvline(x=years[-1] + 0.5, color='#E67E22', linestyle='-', alpha=0.8, linewidth=3)
        ax1.text(years[-1] + 0.5, y_max * 0.98, '历史数据 | 预测数据', ha='center', 
                fontsize=12, fontweight='bold', 
                bbox=dict(boxstyle='round,pad=0.5', facecolor='#E67E22', 
                         alpha=0.8, edgecolor='white'))
        
        # 添加数据范围信息
        info_text = f"历史数据: {years[0]:.0f}-{years[-1]:.0f}年 ({len(years)}个数据点)\n"
        info_text += f"人口范围: {population.min():.0f}-{population.max():.0f}万人"
        ax1.text(0.02, 0.98, info_text, transform=ax1.transAxes, 
                fontsize=10, verticalalignment='top',
                bbox=dict(boxstyle='round,pad=0.5', facecolor='white', 
                         alpha=0.9, edgecolor='gray'))
        
        # 子图：预测数据表格
        ax2.axis('tight')
        ax2.axis('off')
        
        if prediction_data:
            # 创建预测数据表格
            table_data = []
            model_labels = {
                'linear': '线性回归',
                'logistic': 'Logistic模型',
                'grey': '灰色预测GM(1,1)'
            }
            
            for model_name, pred_values in prediction_data.items():
                row = [model_labels.get(model_name, model_name)]
                row.extend([f"{val:.0f}" for val in pred_values])
                table_data.append(row)
            
            col_labels = ['模型/年份'] + [str(year) for year in predict_years]
            
            table = ax2.table(cellText=table_data,
                             colLabels=col_labels,
                             cellLoc='center',
                             loc='center',
                             colColours=['#3498DB'] + ['#E8F4FD'] * len(predict_years))
            
            table.auto_set_font_size(False)
            table.set_fontsize(13)  # 增大字体
            table.scale(1.2, 3)  # 调整表格尺寸
            
            # 设置表格样式
            for i in range(len(table_data) + 1):
                for j in range(len(col_labels)):
                    cell = table[(i, j)]
                    if i == 0:  # 表头
                        cell.set_text_props(weight='bold', color='white')
                        cell.set_facecolor('#3498DB')
                    elif j == 0:  # 第一列
                        cell.set_text_props(weight='bold')
                        cell.set_facecolor('#D5DBDB')
                    else:
                        cell.set_facecolor('#f8f9fa' if i % 2 == 0 else 'white')
            
            ax2.set_title(f'{predict_years[0]}-{predict_years[-1]}年人口预测数据表 (单位: 万人)', 
                         fontsize=14, fontweight='bold', pad=15)
        
        plt.tight_layout()
        
        # 保存图片到本地文件
        output_dir = Path(__file__).parent / "图片"
        print(f"[DEBUG] 输出目录: {output_dir}")
        print(f"[DEBUG] 输出目录是否存在: {output_dir.exists()}")
        
        try:
            output_dir.mkdir(parents=True, exist_ok=True)
            print(f"[DEBUG] 目录创建成功，目录存在: {output_dir.exists()}")
        except Exception as mkdir_error:
            print(f"[DEBUG] 创建目录失败: {mkdir_error}")
            raise mkdir_error
        
        timestamp = pd.Timestamp.now().strftime("%Y%m%d_%H%M%S")
        filename = f"预测结果可视化图_{timestamp}.png"
        filepath = output_dir / filename
        print(f"[DEBUG] 保存文件路径: {filepath}")
        
        try:
            plt.savefig(filepath, format='png', dpi=300, bbox_inches='tight',
                       facecolor='white', edgecolor='none')
            print(f"[DEBUG] 图片保存成功")
            print(f"[DEBUG] 文件是否存在: {filepath.exists()}")
            if filepath.exists():
                print(f"[DEBUG] 文件大小: {filepath.stat().st_size} bytes")
        except Exception as save_error:
            print(f"[DEBUG] 保存图片失败: {save_error}")
            raise save_error
        
        # 转换为base64
        buffer = io.BytesIO()
        plt.savefig(buffer, format='png', dpi=300, bbox_inches='tight',
                   facecolor='white', edgecolor='none')
        buffer.seek(0)
        image_base64 = base64.b64encode(buffer.getvalue()).decode()
        plt.close()
        
        response = {
            "status": "success",
            "chart_type": "预测结果可视化图",
            "image_base64": image_base64,
            "description": "显示历史数据和各模型的预测结果，包含详细的预测数据表",
            "forecast_years": predict_years,
            "prediction_summary": prediction_data,
            "saved_file": str(filepath)
        }
        
        return json.dumps(response, ensure_ascii=False, indent=2)
        
    except Exception as e:
        return json.dumps({
            "status": "error",
            "message": f"预测图生成失败: {str(e)}"
        }, ensure_ascii=False, indent=2)

@mcp.tool()
def visualize_entropy_weights() -> str:
    """生成熵权法权重分配可视化图
    
    Returns:
        包含图表base64编码的JSON字符串
    """
    try:
        if not predictor.results:
            return json.dumps({
                "status": "error",
                "message": "没有可用的模型结果，请先进行预测"
            }, ensure_ascii=False)
        
        # 计算熵权法权重
        entropy_result = predictor.entropy_weight_combination(predictor.results)
        
        if 'error' in entropy_result:
            return json.dumps({
                "status": "error",
                "message": f"熵权法计算失败: {entropy_result['error']}"
            }, ensure_ascii=False)
        
        # 提取权重和性能数据
        weights = entropy_result['weights']
        performance_analysis = entropy_result['performance_analysis']
        
        # 使用改进的样式设置
        set_plot_style()
        fig = plt.figure(figsize=(20, 12))
        
        # 创建复杂网格布局
        gs = fig.add_gridspec(3, 3, height_ratios=[2, 2, 1], width_ratios=[1, 1, 1],
                             hspace=0.3, wspace=0.3)
        
        # 定义颜色方案
        colors = ['#3498DB', '#E74C3C', '#27AE60', '#F39C12', '#9B59B6']
        model_labels = {
            'linear': '线性回归',
            'logistic': 'Logistic模型',
            'grey': '灰色预测GM(1,1)'
        }
        
        # 1. 权重分配饼图
        ax1 = fig.add_subplot(gs[0, 0])
        model_names = [model_labels.get(name, name) for name in weights.keys()]
        weight_values = list(weights.values())
        
        wedges, texts, autotexts = ax1.pie(weight_values, labels=model_names, 
                                          colors=colors[:len(weights)],
                                          autopct='%1.1f%%', startangle=90,
                                          explode=[0.05] * len(weights),
                                          shadow=True, textprops={'fontsize': 12})
        
        # 美化饼图文本
        for autotext in autotexts:
            autotext.set_color('white')
            autotext.set_fontweight('bold')
            autotext.set_fontsize(11)
        
        ax1.set_title('熵权法模型权重分配', fontsize=16, fontweight='bold', pad=20)
        
        # 2. 权重柱状图
        ax2 = fig.add_subplot(gs[0, 1])
        bars = ax2.bar(model_names, weight_values, 
                      color=colors[:len(weights)], alpha=0.8,
                      edgecolor='white', linewidth=2)
        
        # 添加数值标签
        for bar, weight in zip(bars, weight_values):
            height = bar.get_height()
            ax2.text(bar.get_x() + bar.get_width()/2., height + 0.01,
                    f'{weight:.4f}', ha='center', va='bottom', 
                    fontsize=11, fontweight='bold')
        
        ax2.set_title('模型权重数值对比', fontsize=16, fontweight='bold', pad=20)
        ax2.set_ylabel('权重值', fontsize=12, fontweight='bold')
        ax2.set_ylim(0, max(weight_values) * 1.2)
        ax2.grid(True, alpha=0.3, axis='y', linestyle='--')
        ax2.set_facecolor('#f8f9fa')
        
        # 3. R²得分对比
        ax3 = fig.add_subplot(gs[0, 2])
        r2_scores = [performance_analysis['r2_scores'][name] for name in weights.keys()]
        r2_names = [model_labels.get(name, name) for name in weights.keys()]
        
        bars_r2 = ax3.bar(r2_names, r2_scores, 
                          color=colors[:len(weights)], alpha=0.8,
                          edgecolor='white', linewidth=2)
        
        for bar, r2 in zip(bars_r2, r2_scores):
            height = bar.get_height()
            ax3.text(bar.get_x() + bar.get_width()/2., height + 0.01,
                    f'{r2:.4f}', ha='center', va='bottom', 
                    fontsize=11, fontweight='bold')
        
        ax3.set_title('模型R²得分', fontsize=16, fontweight='bold', pad=20)
        ax3.set_ylabel('R²得分', fontsize=12, fontweight='bold')
        ax3.set_ylim(0, 1.05)
        ax3.grid(True, alpha=0.3, axis='y', linestyle='--')
        ax3.set_facecolor('#f8f9fa')
        
        # 4. MSE对比
        ax4 = fig.add_subplot(gs[1, 0])
        mse_values = [performance_analysis['mse_values'][name] for name in weights.keys()]
        
        bars_mse = ax4.bar(r2_names, mse_values, 
                          color=colors[:len(weights)], alpha=0.8,
                          edgecolor='white', linewidth=2)
        
        for bar, mse in zip(bars_mse, mse_values):
            height = bar.get_height()
            ax4.text(bar.get_x() + bar.get_width()/2., height,
                    f'{mse:.0f}', ha='center', va='bottom', 
                    fontsize=11, fontweight='bold')
        
        ax4.set_title('模型均方误差(MSE)', fontsize=16, fontweight='bold', pad=20)
        ax4.set_ylabel('MSE', fontsize=12, fontweight='bold')
        ax4.grid(True, alpha=0.3, axis='y', linestyle='--')
        ax4.set_facecolor('#f8f9fa')
        
        # 5. 指标权重分配
        ax5 = fig.add_subplot(gs[1, 1])
        indicator_weights = performance_analysis['indicator_weights']
        indicator_names = ['R²权重', 'MSE权重']
        indicator_values = [indicator_weights['r2_weight'], indicator_weights['mse_weight']]
        
        wedges2, texts2, autotexts2 = ax5.pie(indicator_values, labels=indicator_names,
                                              colors=['#3498DB', '#E74C3C'],
                                              autopct='%1.1f%%', startangle=90,
                                              explode=[0.05, 0.05],
                                              shadow=True, textprops={'fontsize': 12})
        
        for autotext in autotexts2:
            autotext.set_color('white')
            autotext.set_fontweight('bold')
            autotext.set_fontsize(11)
        
        ax5.set_title('指标权重分配', fontsize=16, fontweight='bold', pad=20)
        
        # 6. 模型综合得分
        ax6 = fig.add_subplot(gs[1, 2])
        model_scores = [performance_analysis['model_scores'][name] for name in weights.keys()]
        
        bars_score = ax6.bar(r2_names, model_scores, 
                            color=colors[:len(weights)], alpha=0.8,
                            edgecolor='white', linewidth=2)
        
        for bar, score in zip(bars_score, model_scores):
            height = bar.get_height()
            ax6.text(bar.get_x() + bar.get_width()/2., height + max(model_scores) * 0.02,
                    f'{score:.4f}', ha='center', va='bottom', 
                    fontsize=11, fontweight='bold')
        
        ax6.set_title('模型综合得分', fontsize=16, fontweight='bold', pad=20)
        ax6.set_ylabel('综合得分', fontsize=12, fontweight='bold')
        ax6.grid(True, alpha=0.3, axis='y', linestyle='--')
        ax6.set_facecolor('#f8f9fa')
        
        # 7. 详细数据表格
        ax7 = fig.add_subplot(gs[2, :])
        ax7.axis('tight')
        ax7.axis('off')
        
        # 创建详细数据表
        table_data = []
        for name in weights.keys():
            label = model_labels.get(name, name)
            weight = weights[name]
            r2 = performance_analysis['r2_scores'][name]
            mse = performance_analysis['mse_values'][name]
            score = performance_analysis['model_scores'][name]
            
            table_data.append([
                label,
                f"{weight:.4f}",
                f"{r2:.4f}",
                f"{mse:.0f}",
                f"{score:.4f}",
                "★" if weight == max(weight_values) else ""
            ])
        
        table = ax7.table(cellText=table_data,
                         colLabels=['模型名称', '熵权法权重', 'R²得分', 'MSE', '综合得分', '最优'],
                         cellLoc='center',
                         loc='center',
                         colColours=['#3498DB'] * 6)
        
        table.auto_set_font_size(False)
        table.set_fontsize(12)
        table.scale(1, 2.5)
        
        # 设置表格样式
        for i in range(len(table_data) + 1):
            for j in range(6):
                cell = table[(i, j)]
                if i == 0:  # 表头
                    cell.set_text_props(weight='bold', color='white')
                    cell.set_facecolor('#3498DB')
                else:
                    cell.set_facecolor('#f8f9fa' if i % 2 == 0 else 'white')
                    if j == 5 and table_data[i-1][j] == "★":  # 最优标记
                        cell.set_text_props(color='red', weight='bold')
        
        ax7.set_title('熵权法综合分析表', fontsize=16, fontweight='bold', pad=20)
        
        # 添加总体说明
        explanation = (
            "熵权法原理说明:\n"
            "1. 基于R²得分和MSE指标构建决策矩阵\n"
            "2. 计算各指标的信息熵，确定指标权重\n"
            "3. 综合评估各模型性能，自动分配权重\n"
            "4. 权重越高表示模型综合性能越好"
        )
        
        fig.text(0.02, 0.02, explanation, fontsize=10, 
                bbox=dict(boxstyle='round,pad=0.5', facecolor='#ECF0F1', 
                         alpha=0.9, edgecolor='gray'),
                verticalalignment='bottom')
        
        # 设置总标题
        fig.suptitle('熵权法模型权重分配可视化分析', fontsize=20, fontweight='bold', y=0.98)
        
        # 保存图片到本地文件
        output_dir = Path(__file__).parent / "图片"
        output_dir.mkdir(parents=True, exist_ok=True)
        
        timestamp = pd.Timestamp.now().strftime("%Y%m%d_%H%M%S")
        filename = f"熵权法权重分析图_{timestamp}.png"
        filepath = output_dir / filename
        
        plt.savefig(filepath, format='png', dpi=300, bbox_inches='tight',
                   facecolor='white', edgecolor='none')
        
        # 转换为base64
        buffer = io.BytesIO()
        plt.savefig(buffer, format='png', dpi=300, bbox_inches='tight',
                   facecolor='white', edgecolor='none')
        buffer.seek(0)
        image_base64 = base64.b64encode(buffer.getvalue()).decode()
        plt.close()
        
        response = {
            "status": "success",
            "chart_type": "熵权法权重分析图",
            "image_base64": image_base64,
            "description": "展示熵权法的权重计算过程和结果分析",
            "entropy_analysis": {
                "model_weights": weights,
                "indicator_weights": indicator_weights,
                "best_model": max(weights.keys(), key=lambda k: weights[k]),
                "weight_distribution": "权重分配基于模型的R²得分和MSE表现"
            },
            "saved_file": str(filepath)
        }
        
        return json.dumps(response, ensure_ascii=False, indent=2)
        
    except Exception as e:
        return json.dumps({
            "status": "error",
            "message": f"熵权法可视化失败: {str(e)}"
        }, ensure_ascii=False, indent=2)

@mcp.resource("population://data")
def get_sample_data() -> str:
    """获取示例人口数据"""
    sample_data = [
        {"year": 2000, "population": 126743},
        {"year": 2001, "population": 127627},
        {"year": 2002, "population": 128453},
        {"year": 2003, "population": 129227},
        {"year": 2004, "population": 129988},
        {"year": 2005, "population": 130756},
        {"year": 2006, "population": 131448},
        {"year": 2007, "population": 132129},
        {"year": 2008, "population": 132802},
        {"year": 2009, "population": 133450},
        {"year": 2010, "population": 134091}
    ]
    
    return json.dumps({
        "description": "中国人口数据示例 (单位: 万人)",
        "data": sample_data,
        "usage": "可以直接将此数据传递给preprocess_population_data函数"
    }, ensure_ascii=False, indent=2)

if __name__ == "__main__":
    mcp.run(transport="stdio")