import re
import ssl
import json
import os
import uuid
import itertools
import datetime
from pathlib import Path
from typing import Union, List, Dict, Any

import numpy as np
import pandas as pd
import plotly.express as px
import nltk
import statsmodels.stats.weightstats as sm_stats
from scipy import stats
from scipy.stats import levene, tukey_hsd
from fastapi import FastAPI, File, UploadFile
from fastapi.staticfiles import StaticFiles
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression

from mcp.server.fastmcp import FastMCP
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords

# ========================
# 配置与常量定义
# ========================
ALPHA = 0.05
MAX_SAMPLE_SIZE = 10000
PLOT_DIR = Path(__file__).parent / 'plots'
INTENT_LABELS = ['t_test', 'chi_square', 'chi_square_independence', 'anova', 'describe']

# 意图识别训练数据
TRAIN_DATA = [
    ('比较两组数据差异', 't_test'),
    ('两组样本是否有显著差异', 't_test'),
    ('A组和B组有区别吗', 't_test'),
    ('检验两组均值差异', 't_test'),
    ('卡方检验', 'chi_square'),
    ('类别数据关联性分析', 'chi_square'),
    ('列联表分析', 'chi_square'),
    ('分类变量相关性', 'chi_square'),
    ('方差分析', 'anova'),
    ('多组数据比较', 'anova'),
    ('三种广告效果差异分析', 'chi_square_independence'),
    ('广告组购买率比较', 'chi_square_independence'),
    ('列联表独立性检验', 'chi_square_independence'),
    ('多组分类数据比较', 'chi_square_independence'),
]

# ========================
# NLTK资源下载 (已禁用)
# ========================
# try:
#     _create_unverified_https_context = ssl._create_unverified_context
# except AttributeError:
#     pass
# else:
#     ssl._create_default_https_context = _create_unverified_https_context
# try:
#     nltk.download('punkt', quiet=True)
#     nltk.download('stopwords', quiet=True)
# except ssl.SSLError as e:
#     print(f"NLTK资源下载SSL错误: {e}")
#     print("建议关闭VPN或使用国内镜像源")
# except Exception as e:
#     print(f"NLTK资源下载失败: {e}")
#     print("请手动安装NLTK资源: python -m nltk.downloader punkt stopwords")

# ========================
# 意图识别模型初始化 (已禁用)
# ========================
VECTORIZER = TfidfVectorizer()
# INTENT_MODEL = LogisticRegression()

# # 训练意图识别模型
# def train_intent_model() -> None:
#     """训练意图识别模型

#     使用TF-IDF向量化文本特征，训练逻辑回归分类器
#     """
#     texts, labels = zip(*TRAIN_DATA)
#     X = VECTORIZER.fit_transform(texts)
#     INTENT_MODEL.fit(X, labels)

# # 初始化时训练模型
# train_intent_model()

# ========================
# 数据提取函数
# ========================
def extract_data(query: str) -> Union[List, Dict[str, List[float]]]:
    """从自然语言查询中提取结构化数据

    Args:
        query: 用户输入的自然语言查询

    Returns:
        提取的结构化数据（列表或字典形式）
    """
    # 提取数字列表（支持小数和负数）
    extracted_numbers = list(map(float, re.findall(r'-?\b\d+\.?\d*\b', query)))
    
    # 提取广告组样本量和购买率格式（如：电视广告组200人购买率25%）
    ad_pattern = r'([^，,；;]+广告组)(\d+)人购买率(\d+)%'
    ad_matches = re.findall(ad_pattern, query)
    
    if ad_matches:
        observed = []
        purchased = []
        not_purchased = []
        for group_name, sample_size, rate in ad_matches:
            n = int(sample_size)
            p = int(rate) / 100
            buy = round(n * p)
            not_buy = n - buy
            purchased.append(buy)
            not_purchased.append(not_buy)
        # 构建2xN列联表（购买/未购买 x 广告组）
        observed.append(purchased)
        observed.append(not_purchased)
        return observed
    
    # 提取样本量、均值、标准差格式（如：第一组10人，平均分80分，标准差8分）
    stats_pattern = r'([^，,；;]+组)(\d+)人，平均分(\d+)分，标准差(\d+)分'
    stats_matches = re.findall(stats_pattern, query)
    
    if stats_matches:
        group_data = {}
        for group_name, sample_size, mean, std in stats_matches:
            n = int(sample_size)
            mu = float(mean)
            sigma = float(std)
            # 生成符合正态分布的样本数据
            np.random.seed(42)  # 确保结果可复现
            data = np.random.normal(mu, sigma, n).tolist()
            group_data[group_name.strip()] = data
        return group_data
    
    # 提取简化格式：X分左右Y个人（支持多组数据）
    simple_pattern = r'(第[一二三四五六七八九十]+组)\s*(\d+)\s*分左右\s*(\d+)\s*个人'
    simple_matches = re.findall(simple_pattern, query)
    
    if simple_matches:
        group_data = {}
        for group_name, mean, sample_size in simple_matches:
            n = int(sample_size)
            mu = float(mean)
            sigma = 5.0  # 默认标准差
            # 生成符合正态分布的样本数据
            np.random.seed(42)  # 确保结果可复现
            data = np.random.normal(mu, sigma, n).tolist()
            group_data[group_name.strip()] = data
        return group_data
    
    # 提取组别信息和数据分组
    group_data = {}
    patterns = [
        r'([A-Za-z]组|组[一二三四五六七八九])[\[\(]([\d,\.\s-]+)[\]\)]',
        r'([A-Za-z]组|组[一二三四五六七八九])[:：]([\d,\.\s-]+)'
    ]
    
    for pattern in patterns:
        matches = re.findall(pattern, query)
        for group_name, data_str in matches:
            group_data[group_name] = list(map(float, re.findall(r'-?\b\d+\.?\d*\b', data_str)))
    
    # 如果未提取到结构化数据，则使用默认分组
    if not group_data:
        group_labels = re.findall(r'[ABCDEFG]组|组[一二三四五六七八九]', query) or ['组1', '组2']
        group_count = len(group_labels)
        if group_count > 0 and len(extracted_numbers) > 0:
            base = len(extracted_numbers) // group_count
            remainder = len(extracted_numbers) % group_count
            for i, group in enumerate(group_labels):
                start = i * base + min(i, remainder)
                end = start + base + (1 if i < remainder else 0)
                group_data[group] = extracted_numbers[start:end]
        else:
            group_data['默认组'] = extracted_numbers
    
    return group_data

# ========================
# 自然语言响应生成
# ========================
def generate_response(result: dict, intent: str) -> str:
    """生成统计分析结果的自然语言响应

    Args:
        result: 统计分析结果字典
        intent: 分析意图类型

    Returns:
        格式化的自然语言响应字符串
    """
    if 'error' in result:
        return f"分析出错: {result['error']} (检测到{result.get('groups_detected', 0)}组数据)"

    if intent == 't_test':
        return (
            f"独立样本t检验结果显示: t值={result['statistic']:.4f}, p值={result['p_value']:.6f}, "
            f"{'' if result['significant'] else '不'}具有统计学显著性差异 (α={ALPHA})。\n"
            f"方法: {result['method']}\n"
            f"可视化结果: {result['visualization']}"
        )
    elif intent == 'chi_square':
        correction_note = f"(应用Yates连续性校正)" if result['correction_applied'] else ""
        return (
            f"卡方检验结果显示: χ²值={result['chi2_stat']:.4f}, p值={result['p_value']:.6f}, {correction_note} "
            f"{'' if result['significant'] else '不'}具有统计学显著性关联 (α={ALPHA})。\n"
            f"可视化结果: {result['visualization']}"
        )
    elif intent == 'chi_square_independence':
        return (
            f"卡方独立性检验结果显示: χ²值={result['chi2_stat']:.4f}, p值={result['p_value']:.6f}, 自由度={result['degrees_of_freedom']}, "
            f"{'' if result['significant'] else '不'}具有统计学显著性差异 (α={ALPHA})。\n"
            f"可视化结果: {result['visualization']}"
        )
    elif intent == 'anova':
        homogeneity_result = "通过" if result['homogeneity'] else "未通过"
        means_str = ', '.join([f'{k}均值={v:.2f}' for k, v in result['group_means'].items()])
        
        # 构建事后检验结果文本
        post_hoc_text = ""
        if result.get('post_hoc') and result['post_hoc']['comparisons']:
            comparisons = []
            for comp in result['post_hoc']['comparisons']:
                sig = '具有' if comp['significant'] else '不具有'
                comparisons.append(f"{comp['groups']}: 均值差异={comp['mean_difference']:.4f}, 调整后p值={comp['p_adjusted']:.6f}, {sig}统计学显著性")
            
            significant_pairs = result['post_hoc']['significant_pairs']
            if significant_pairs:
                post_hoc_text = f"\n事后检验(Tukey HSD)发现以下组别间存在显著差异: {', '.join(significant_pairs)}\n详细比较: {'; '.join(comparisons)}"
            else:
                post_hoc_text = f"\n事后检验(Tukey HSD)未发现显著差异的组别对。\n详细比较: {'; '.join(comparisons)}"
        elif result['significant']:
            post_hoc_text = "\n由于ANOVA结果显著但事后检验失败，无法确定具体差异组别。"
        
        return (
            f"方差同质性检验(Levene's test)结果: W值={result['levene_statistic']:.4f}, p值={result['levene_p_value']:.6f}, "
            f"{homogeneity_result}方差同质性假设。\n"
            f"方差分析结果显示: F值={result['f_statistic']:.4f}, p值={result['p_value']:.6f}, "
            f"{'' if result['significant'] else '不'}具有统计学显著性差异 (α={ALPHA})。\n"
            f"各组均值: {means_str}{post_hoc_text}\n"
            f"可视化结果: {result['visualization']}"
        )
    elif intent == 'describe':
        desc_str = '\n'.join([f'{group}: 样本量={stats["count"]}, 均值={stats["mean"]:.2f}, 标准差={stats["std"]:.2f}, ' \
                             f'最小值={stats["min"]:.2f}, 最大值={stats["max"]:.2f}' \
                             for group, stats in result.items()])
        return f"描述统计结果:\n{desc_str}"
    else:
        return f"不支持的分析类型: {intent}"

# ========================
# 意图识别与分析执行
# ========================
def _recognize_intent(query: str) -> str:
    """识别用户查询意图

    Args:
        query: 用户自然语言查询

    Returns:
        识别出的意图类型字符串
    """
    X = VECTORIZER.transform([query])
    return INTENT_MODEL.predict(X)[0]


def _perform_analysis(data: Union[List, Dict[str, List[float]]], intent: str, alpha: float) -> dict:
    """根据意图执行相应的统计分析

    Args:
        data: 提取的结构化数据
        intent: 识别出的分析意图
        alpha: 显著性水平

    Returns:
        统计分析结果字典
    """
    result = {}
    if isinstance(data, dict):
        groups = list(data.values())
        group_names = list(data.keys())
    else:
        groups = data
        group_names = [f'组{i+1}' for i in range(len(groups))]
    
    if intent == 't_test':
        if len(groups) >= 2:
            sample1, sample2 = groups[0], groups[1]
            # 确保两组样本长度一致
            min_length = min(len(sample1), len(sample2))
            sample1 = sample1[:min_length]
            sample2 = sample2[:min_length]
            result = t_test(sample1, sample2, alpha)
        else:
            result = {'error': 't检验需要至少两组数据', 'groups_detected': len(groups)}
    
    elif intent == 'chi_square':
        if len(groups) >= 2 and all(len(g) == 2 for g in groups):
            observed = [g[0] for g in groups]
            expected = [g[1] for g in groups]
            result = chi_square(observed, expected, alpha)
        else:
            result = {'error': '卡方检验需要至少两组二值数据', 'groups_detected': len(groups)}
    
    elif intent == 'chi_square_independence':
        if isinstance(data, list) and len(data) >= 2 and all(len(row) >= 2 for row in data):
            result = chi_square_independence(data, alpha)
        else:
            result = {'error': '卡方独立性检验需要至少2x2的列联表数据', 'groups_detected': len(groups) if isinstance(groups, list) else 0}
    
    elif intent == 'anova':
        if len(groups) >= 2:
            result = anova(groups, alpha)
        else:
            result = {'error': '方差分析需要至少两组数据', 'groups_detected': len(groups)}
    
    elif intent == 'describe':
        result = {}
        for name, group_data in zip(group_names, groups):
            result[name] = {
                'count': len(group_data),
                'mean': round(np.mean(group_data), 2),
                'std': round(np.std(group_data), 2),
                'min': round(np.min(group_data), 2),
                'max': round(np.max(group_data), 2)
            }
    
    else:
        result = {'message': f'暂不支持{intent}类型分析'}
    
    return result

# ========================
# FastAPI应用与服务配置
# ========================
# 创建FastAPI应用
app = FastAPI()

# 配置静态文件服务
PLOT_DIR.mkdir(exist_ok=True)
app.mount("/plots", StaticFiles(directory=str(PLOT_DIR)), name="plots")

# 加载配置
try:
    config_path = os.path.join(os.path.dirname(__file__), 'config.json')
    with open(config_path, 'r') as f:
        config = json.load(f)
except FileNotFoundError:
    raise RuntimeError("配置文件 config.json 未找到，请确保文件存在于项目根目录")
except json.JSONDecodeError:
    raise RuntimeError("配置文件格式错误，请检查 JSON 语法")

# 从配置读取环境变量
MAX_SAMPLE_SIZE = int(config['mcpServers']['stats-server']['env'].get('MAX_SAMPLE_SIZE', MAX_SAMPLE_SIZE))
ALPHA = float(config['mcpServers']['stats-server']['env'].get('ALPHA', ALPHA))

# 挂载静态文件目录
app.mount("/plots", StaticFiles(directory=PLOT_DIR), name="plots")

# 初始化MCP服务
mcp = FastMCP('StatsService', app=app)

def parse_uploaded_file(file_path: str) -> pd.DataFrame:
    
    """解析上传的Excel或CSV文件

    Args:
        file_path: 文件路径

    Returns:
        解析后的DataFrame
    """
    ext = file_path.split('.')[-1].lower()
    if ext == 'csv':
        return pd.read_csv(file_path)
    elif ext in ['xlsx', 'xls']:
        return pd.read_excel(file_path)
    else:
        raise ValueError(f"不支持的文件格式: {ext}，仅支持CSV和Excel文件")


@app.post("/analyze_file")
async def analyze_file(
    file: UploadFile = File(...),
    query: str = ""
):
    """上传文件并进行统计分析

    Args:
        file: 上传的Excel或CSV文件
        query: 用户的自然语言查询

    Returns:
        统计分析结果
    """
    # 保存上传的文件
    file_id = str(uuid.uuid4())
    file_path = UPLOAD_DIR / f"{file_id}_{file.filename}"
    try:
        with open(file_path, "wb") as f:
            f.write(await file.read())

        # 解析文件
        df = parse_uploaded_file(str(file_path))

        # 转换DataFrame为分析所需格式
        data = {col: df[col].dropna().tolist() for col in df.columns}

        # 识别意图并执行分析
        intent = _recognize_intent(query)
        result = _perform_analysis(data, intent, ALPHA)
        response = generate_response(result, intent)

        return {"response": response}
    except Exception as e:
        return {"error": f"分析失败: {str(e)}"}
    finally:
        # 清理临时文件
        if os.path.exists(file_path):
            os.remove(file_path)


# ========================
# 统计分析工具函数
# ========================
@mcp.tool()
def t_test(sample1: Union[List[float], np.ndarray], sample2: Union[List[float], np.ndarray], alpha: float = ALPHA) -> dict:
    """执行独立样本t检验

    Args:
        sample1: 第一组样本数据
        sample2: 第二组样本数据
        alpha: 显著性水平，默认值为ALPHA

    Returns:
        包含t统计量、p值、显著性判断和可视化链接的字典
    """
    # 样本量验证
    if len(sample1) > MAX_SAMPLE_SIZE or len(sample2) > MAX_SAMPLE_SIZE:
        raise ValueError(f"样本量不能超过{MAX_SAMPLE_SIZE}")

    # 处理样本长度不一致问题
    min_length = min(len(sample1), len(sample2))
    if len(sample1) != len(sample2):
        sample1 = sample1[:min_length]
        sample2 = sample2[:min_length]
        import warnings
        warnings.warn(f"样本长度不一致，已截断为最短样本长度: {min_length}")

    # 方差齐性检查
    _, p_var = stats.levene(sample1, sample2)
    equal_var = p_var > alpha

    # 执行t检验
    t_stat, p_val = stats.ttest_ind(sample1, sample2, equal_var=equal_var)

    # 确保plots目录存在
    PLOT_DIR.mkdir(exist_ok=True)
    
    # 生成箱线图
    fig = px.box(
        y=[sample1, sample2], 
        labels={'value': '数值', 'variable': '组别'}, 
        title=f't检验结果 (p={p_val:.4f})'
    )
    # 使用UUID生成唯一文件名
    plot_filename = f't_test_{uuid.uuid4().hex[:10]}.html'
    plot_path = PLOT_DIR / plot_filename
    # 调试日志文件路径
    debug_log_path = os.path.join(PLOT_DIR, 't_test_debug.log')
    
    try:
        # 写入调试信息到日志文件
        with open(debug_log_path, 'a') as log_file:
            log_file.write(f"\n=== {datetime.datetime.now()} ===\n")
            log_file.write(f"尝试生成可视化文件: {plot_path}\n")
            log_file.write(f"目录是否存在: {os.path.exists(PLOT_DIR)}\n")
            log_file.write(f"目录路径: {PLOT_DIR}\n")
            log_file.write(f"目录权限: {oct(os.stat(PLOT_DIR).st_mode)[-3:]}\n")
            log_file.write(f"当前工作目录: {os.getcwd()}\n")
        
        fig.write_html(plot_path)
        
        # 记录成功信息
        with open(debug_log_path, 'a') as log_file:
            log_file.write(f"成功生成可视化文件: {plot_path}\n")
            log_file.write(f"文件大小: {os.path.getsize(plot_path)} bytes\n")
    except Exception as e:
        # 记录错误信息
        with open(debug_log_path, 'a') as log_file:
            log_file.write(f"生成可视化文件失败: {str(e)}\n")
            log_file.write(f"错误类型: {type(e).__name__}\n")
        raise
    visualization_url = f'http://localhost:8036/plots/{plot_filename}'

    return {
        "statistic": round(t_stat, 4),
        "p_value": round(p_val, 6),
        "significant": p_val < alpha,
        "method": "Welch's t-test" if not equal_var else "Student's t-test",
        "visualization": visualization_url,
        "plot_path": plot_path
    }


@mcp.tool()
def anova(groups: List[Union[List[float], np.ndarray]], alpha: float = ALPHA) -> dict:
    """执行单因素方差分析

    Args:
        groups: 多组样本数据组成的列表
        alpha: 显著性水平，默认值为ALPHA

    Returns:
        包含F统计量、p值、方差同质性检验结果和可视化链接的字典
    """
    # 输入验证
    if len(groups) < 2:
        raise ValueError("方差分析至少需要两组数据")
    if any(len(group) == 0 for group in groups):
        raise ValueError("每组数据不能为空")
    
    # 执行方差同质性检验
    stat_levene, p_levene = levene(*groups)
    
    # 执行ANOVA分析
    f_stat, p_val = stats.f_oneway(*groups)
    
    # 执行Tukey HSD事后检验
    tukey_result = None
    if p_val < alpha:
        # 准备数据用于Tukey HSD
        data = []
        labels = []
        for i, group in enumerate(groups):
            data.extend(group)
            labels.extend([i]*len(group))
        tukey_result = tukey_hsd(data, labels)
    
    # 处理事后检验结果
    post_hoc = None
    if tukey_result is not None:
        post_hoc = {
            "comparisons": [],
            "significant_pairs": []
        }
        group_labels = [f'组{i+1}' for i in range(len(groups))]
        # 生成所有可能的组别对组合
        group_indices = list(range(len(groups)))
        comparisons = list(itertools.combinations(group_indices, 2))
        
        # 提取比较结果
        for i, (group1_idx, group2_idx) in enumerate(comparisons):
            group1 = group_labels[group1_idx]
            group2 = group_labels[group2_idx]
            # 确保索引不超出范围
            if i < len(tukey_result.meandiffs) and i < len(tukey_result.pvalue):
                diff = round(tukey_result.meandiffs[i], 4)
                p_adj = round(tukey_result.pvalue[i], 6)
                significant = p_adj < alpha
            else:
                diff = 0.0
                p_adj = 1.0
                significant = False
            
            comparison = {
                "groups": f"{group1}-{group2}",
                "mean_difference": diff,
                "p_adjusted": p_adj,
                "significant": significant
            }
            post_hoc["comparisons"].append(comparison)
            
            if significant:
                post_hoc["significant_pairs"].append(f"{group1}-{group2}")
    
    # 生成箱线图
    fig = px.box(
        y=groups,
        labels={'value': '数值', 'variable': '组别'},
        title=f'方差分析结果 (F={f_stat:.4f}, p={p_val:.4f})'
    )
    plot_path = f'plots/anova_{hash(tuple(tuple(g) for g in groups))}.html'
    visualization_url = f'http://localhost:{config["mcpServers"]["stats-server"]["port"]}/{plot_path}'
    fig.write_html(plot_path)
    
    # 计算各组均值
    means = [np.mean(group) for group in groups]
    group_labels = [f'组{i+1}' for i in range(len(groups))]
    
    return {
        "f_statistic": round(f_stat, 4),
        "p_value": round(p_val, 6),
        "significant": p_val < alpha,
        "levene_statistic": round(stat_levene, 4),
        "levene_p_value": round(p_levene, 6),
        "homogeneity": p_levene >= alpha,
        "group_means": {label: round(mean, 2) for label, mean in zip(group_labels, means)},
        "post_hoc": post_hoc,
        "visualization": visualization_url
    }


@mcp.tool()
def chi_square(observed: Union[List[int], np.ndarray], expected: Union[List[int], np.ndarray], alpha: float = ALPHA) -> dict:
    """执行卡方拟合优度检验

    Args:
        observed: 观察频数
        expected: 期望频数
        alpha: 显著性水平，默认值为ALPHA

    Returns:
        包含卡方统计量、p值、显著性判断和可视化链接的字典
    """
    # 输入验证
    if len(observed) != len(expected):
        raise ValueError("观察值和期望值长度必须一致")
    if any(v < 0 for v in observed + expected):
        raise ValueError("观察值和期望值不能为负数")
    if len(observed) > MAX_SAMPLE_SIZE:
        raise ValueError(f"类别数量不能超过{MAX_SAMPLE_SIZE}")
    
    # 自动进行小样本校正
    correction = len(observed) < 30
    chi2, p = stats.chisquare(f_obs=observed, f_exp=expected, correction=correction)
    
    # 生成观察值与期望值对比图
    fig = px.bar(
        x=[f'类别{i+1}' for i in range(len(observed))],
        y=[observed, expected],
        barmode='group',
        labels={'value': '频数', 'variable': '类型'},
        title=f'卡方检验结果 (χ²={chi2:.4f}, p={p:.4f})'
    )
    plot_path = f'plots/chi_square_{hash(tuple(observed)+tuple(expected))}.html'
    visualization_url = f'http://localhost:{config["mcpServers"]["stats-server"]["port"]}/{plot_path}'
    fig.write_html(plot_path)
    
    return {
        "chi2_stat": round(chi2, 4),
        "p_value": round(p, 6),
        "correction_applied": correction,
        "significant": p < alpha,
        "visualization": visualization_url
    }


@mcp.tool()
def chi_square_independence(observed: Union[List[List[int]], np.ndarray], alpha: float = ALPHA) -> dict:
    import numpy as np
    import os
    import uuid
    import plotly.graph_objects as go
    from scipy.stats import chi2_contingency

    # 确保观察矩阵以广告类型为行
    if len(observed) == 2 and len(observed[0]) == 3:
        observed = list(zip(*observed))
    
    # 执行卡方检验逻辑
    chi2, p, dof, expected = chi2_contingency(observed)
    significant = p < alpha

    # 准备数据
    observed_sum = [sum(row) for row in observed]
    expected_sum = [sum(row) for row in expected]
    categories = ['电视广告', '网络广告', '户外广告']

    # 创建可视化图表
    fig = go.Figure()
    fig.add_trace(go.Bar(x=categories, y=observed_sum, name='观察频数', marker_color='royalblue'))
    fig.add_trace(go.Bar(x=categories, y=expected_sum, name='期望频数', marker_color='lightcoral'))

    # 设置图表属性
    fig.update_layout(
        title='卡方独立性检验: 观察频数 vs 期望频数',
        xaxis_title='广告类型',
        yaxis_title='频数',
        barmode='group',
        legend_title='频数类型',
        width=800,
        height=500
    )

    # 确保plots目录存在
    plot_dir = os.path.join(os.path.dirname(__file__), 'plots')
    os.makedirs(plot_dir, exist_ok=True)

    # 生成唯一文件名并保存到plots目录
    plot_filename = f'chi_square_ind_{uuid.uuid4().hex[:10]}.html'
    plot_path = os.path.join(plot_dir, plot_filename)
    fig.write_html(plot_path)

    # 清除图表资源
    fig = None

    # 准备返回结果
    result = {
        'chi2_stat': round(chi2, 4),
        'p_value': round(p, 6),
        'degrees_of_freedom': dof,
        'significant': str(significant),
        'expected_frequencies': expected.tolist(),
        'visualization': f'http://localhost:8036/plots/{plot_filename}'
    }
    return result
    """执行卡方独立性检验

    Args:
        observed: 列联表观察频数
        alpha: 显著性水平，默认值为ALPHA

    Returns:
        包含卡方统计量、p值、自由度和可视化链接的字典
    """
    # 输入验证
    if not isinstance(observed, (list, np.ndarray)) or len(observed) < 2 or len(observed[0]) < 2:
        raise ValueError("列联表必须至少为2x2矩阵")
    if any(any(v < 0 for v in row) for row in observed):
        raise ValueError("观察频数不能为负数")
    
    # 执行卡方独立性检验
    chi2, p_value, dof, expected = stats.chi2_contingency(observed)
    
    # 生成分组柱状图
    groups = [f'组{i+1}' for i in range(len(observed[0]))]
    categories = [f'类别{j+1}' for j in range(len(observed))]
    
    # 准备数据用于可视化
    plot_data = []
    for i, group in enumerate(groups):
        for j, category in enumerate(categories):
            plot_data.append({
                '组别': group,
                '类别': category,
                '频数': observed[j][i],
                '类型': '观察值'
            })
            plot_data.append({
                '组别': group,
                '类别': category,
                '频数': round(expected[j][i], 2),
                '类型': '期望值'
            })
    
    fig = px.bar(
        plot_data,
        x='组别',
        y='频数',
        color='类型',
        barmode='group',
        facet_col='类别',
        title=f'卡方独立性检验结果 (χ²={chi2:.4f}, p={p_value:.4f}, df={dof})'
    )
    plot_path = f'plots/chi_square_ind_{hash(str(observed))}.html'
    visualization_url = f'http://localhost:{config["mcpServers"]["stats-server"]["port"]}/{plot_path}'
    fig.write_html(plot_path)
    
    return {
        "chi2_stat": round(chi2, 4),
        "p_value": round(p_value, 6),
        "degrees_of_freedom": dof,
        "significant": p_value < alpha,
        "expected_frequencies": expected.tolist(),
        "visualization": visualization_url
    }


@mcp.tool()
def natural_language_analysis(query: str, alpha: float = ALPHA) -> dict:
    """自然语言统计分析接口

    Args:
        query: 用户自然语言查询
        alpha: 显著性水平，默认值为ALPHA

    Returns:
        包含意图识别结果、统计分析结果和自然语言响应的字典
    """
    # 意图识别
    intent = _recognize_intent(query)
    
    # 数据提取
    extracted_data = extract_data(query)
    
    # 执行统计分析
    analysis_result = _perform_analysis(extracted_data, intent, alpha)
    
    # 生成自然语言响应
    natural_response = generate_response(analysis_result, intent)
    
    return {
        'intent': intent,
        'original_query': query,
        'statistical_result': analysis_result,
        'natural_language_response': natural_response
    }


# ========================
# 服务运行入口
# ========================
if __name__ == '__main__':
    mcp.run()