from mcp.server.fastmcp import FastMCP
from typing import List, Dict, Literal, Union
import numpy as np
from scipy import stats
from scipy.stats import norm, poisson, binom, chi2_contingency
import pandas as pd
import matplotlib.pyplot as plt
import logging
import os

# 配置日志
log_dir = os.path.join(os.path.dirname(__file__), 'logs')
os.makedirs(log_dir, exist_ok=True)
logging.basicConfig(
    filename=os.path.join(log_dir, 'mcp_integrated.log'),
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    encoding='utf-8'
)

logger = logging.getLogger(__name__)

# 创建统一MCP服务
mcp = FastMCP("Integrated Statistics Service")

# 1. 描述性统计模块
@mcp.tool()
def calculate_mean(data: List[float]) -> Dict:
    """计算均值统计量"""
    return {
        "样本量": len(data),
        "最大值": float(max(data)),
        "最小值": float(min(data)),
        "平均值": float(sum(data) / len(data))
    }

@mcp.tool()
def calculate_median(data: List[float]) -> Dict:
    """计算中位数统计量"""
    sorted_data = sorted(data)
    n = len(sorted_data)
    mid = n // 2
    median = (sorted_data[mid - 1] + sorted_data[mid]) / 2 if n % 2 == 0 else sorted_data[mid]
    return {
        "样本量": n,
        "最大值": float(max(data)),
        "最小值": float(min(data)),
        "中位数": float(median)
    }

@mcp.tool()
def calculate_variance(data: List[float]) -> Dict:
    """计算方差和标准差"""
    data_array = np.array(data)
    variance = np.var(data_array)
    std_dev = np.std(data_array)
    return {
        "样本量": len(data),
        "最大值": float(np.max(data_array)),
        "最小值": float(np.min(data_array)),
        "方差": float(variance),
        "标准差": float(std_dev)
    }

@mcp.tool()
def calculate_skewness(data: List[float]) -> Dict:
    """计算偏度"""
    data_array = np.array(data)
    skewness = stats.skew(data_array)
    return {
        "样本量": len(data),
        "最大值": float(np.max(data_array)),
        "最小值": float(np.min(data_array)),
        "偏度": float(skewness)
    }

@mcp.tool()
def calculate_kurtosis(data: List[float]) -> Dict:
    """计算峰度"""
    data_array = np.array(data)
    kurtosis = stats.kurtosis(data_array)
    return {
        "样本量": len(data),
        "最大值": float(np.max(data_array)),
        "最小值": float(np.min(data_array)),
        "峰度": float(kurtosis)
    }

# 文件分析工具
@mcp.tool()
def analyze_csv(file_path: str, column: str) -> dict:
    """分析CSV文件数据"""
    if not file_path:
        raise ValueError("请提供CSV文件路径")
    if not column:
        raise ValueError("请提供要分析的列名")
    
    # 尝试常见中文编码格式
    encodings = ['utf-8', 'gb18030', 'gbk', 'utf-16']
    df = None
    for encoding in encodings:
        try:
            df = pd.read_csv(file_path, encoding=encoding)
            break
        except UnicodeDecodeError:
            continue
    
    if df is None:
        raise ValueError("无法解码CSV文件，请检查文件编码格式")
        
    if column not in df.columns:
        raise ValueError(f"列 '{column}' 不存在于CSV文件中")
        
    data = df[column].tolist()
    variance_stats = calculate_variance(data)
    return {
        "mean": calculate_mean(data),
        "median": calculate_median(data),
        "variance": variance_stats["方差"],
        "std_dev": variance_stats["标准差"],
        "skewness": calculate_skewness(data),
        "kurtosis": calculate_kurtosis(data)
    }

@mcp.tool()
def analyze_xlsx(file_path: str, sheet_name: str, column: str) -> dict:
    """分析XLSX文件数据"""
    if not file_path:
        raise ValueError("请提供XLSX文件路径")
    if not sheet_name:
        raise ValueError("请提供工作表名称")
    if not column:
        raise ValueError("请提供要分析的列名")
    
    try:
        # Excel文件通常不需要指定编码，但保留此接口以备未来扩展
        df = pd.read_excel(file_path, sheet_name=sheet_name, engine='openpyxl')
        
        if column not in df.columns:
            raise ValueError(f"列 '{column}' 不存在于工作表 '{sheet_name}' 中")
            
        data = df[column].dropna().tolist()
        if not data:
            raise ValueError("提取的数据为空，请检查文件内容")
            
        if not all(isinstance(x, (int, float)) for x in data):
            raise TypeError("数据包含非数值类型，请确保所选列只包含数字")

        variance_stats = calculate_variance(data)    
        return {
            "mean": calculate_mean(data),
            "median": calculate_median(data),
            "variance": variance_stats["方差"],
            "std_dev": variance_stats["标准差"],
            "skewness": calculate_skewness(data),
            "kurtosis": calculate_kurtosis(data)
        }
    except FileNotFoundError:
        raise ValueError(f"文件 '{file_path}' 不存在")
    except ValueError as e:
        raise ValueError(f"数据处理错误: {str(e)}")
    except Exception as e:
        raise RuntimeError(f"读取XLSX文件时发生错误: {str(e)}")

@mcp.tool()
def generate_boxplot(data: List[float]) -> str:
    """生成箱线图"""
    plt.figure(figsize=(8, 6))
    # 确保中文显示正常
    plt.rcParams["font.family"] = ["SimHei"]
    plt.rcParams['axes.unicode_minus'] = False  # 解决负号显示问题
    # 自定义箱线图样式
    whiskerprops = dict(color='blue')
    capprops = dict(color='blue')
    medianprops = dict(color='red', linewidth=2)
    flierprops = dict(marker='o', color='red', alpha=0.5)
    plt.boxplot(data, whiskerprops=whiskerprops,
                capprops=capprops, medianprops=medianprops, flierprops=flierprops, patch_artist=True)

    # 设置箱体颜色
    boxes = plt.gca().findobj(match=plt.Rectangle)
    for box in boxes:
        box.set_facecolor('lightblue')
        box.set_edgecolor('blue')
    plt.title('数据分布箱线图')
    plt.ylabel('数值')
    plt.savefig('boxplot.png', dpi=300, bbox_inches='tight')
    plt.show()
    plt.close()
    return 'boxplot.png'

# 2. 概率分布模块
@mcp.tool()
def normal_distribution(
    mean: float,
    std_dev: float,
    x: float,
    calc_type: Literal['pdf', 'cdf'] = 'pdf'
) -> float:
    """正态分布计算"""
    if calc_type == 'pdf':
        return float(norm.pdf(x, loc=mean, scale=std_dev))
    else:
        return float(norm.cdf(x, loc=mean, scale=std_dev))

@mcp.tool()
def poisson_distribution(
    lambda_: float,
    x: int,
    calc_type: Literal['pdf', 'cdf'] = 'pdf'
) -> float:
    """泊松分布计算"""
    if calc_type == 'pdf':
        return float(poisson.pmf(x, mu=lambda_))
    else:
        return float(poisson.cdf(x, mu=lambda_))

@mcp.tool()
def binomial_distribution(
    n: int,
    p: float,
    x: int,
    calc_type: Literal['pdf', 'cdf'] = 'pdf'
) -> float:
    """二项分布计算"""
    if calc_type == 'pdf':
        return float(binom.pmf(x, n=n, p=p))
    else:
        return float(binom.cdf(x, n=n, p=p))

# 3. 假设检验模块
@mcp.tool()
def one_sample_t_test(sample: List[float], test_value: float) -> str:
    """单样本T检验"""
    try:
        sample_array = np.array(sample)
        t_stat, p_value = stats.ttest_1samp(sample_array, test_value, alternative='two-sided')
        return f"单样本T检验结果:\nt = {t_stat:.4f}\np = {p_value:.4f} (双尾检验，95%置信水平)"
    except Exception as e:
        return f"计算失败: {str(e)}"

@mcp.tool()
def one_sample_z_test(sample: List[float], population_mean: float, population_std: float) -> str:
    """单样本Z检验"""
    try:
        arr = np.array(sample)
        n = len(arr)
        if n < 1:
            return "错误: 样本量不足"
        sample_mean = np.mean(arr)
        z = (sample_mean - population_mean) / (population_std / np.sqrt(n))
        p = 2 * (1 - stats.norm.cdf(abs(z)))
        return f"单样本Z检验结果:\nz = {z:.4f}\np = {p:.4f} (双尾检验，95%置信水平)"
    except Exception as e:
        return f"计算失败: {str(e)}"

@mcp.tool()
def paired_sample_t_test(sample1: List[float], sample2: List[float]) -> str:
    """配对样本T检验"""
    try:
        arr1 = np.array(sample1)
        arr2 = np.array(sample2)
        if len(arr1) != len(arr2):
            return "错误: 两个样本长度不一致"
        
        t_stat, p_value = stats.ttest_rel(arr1, arr2, alternative='two-sided')
        return f"配对样本T检验结果:\nt = {t_stat:.4f}\np = {p_value:.4f} (双尾检验)"
    except Exception as e:
        return f"计算失败: {str(e)}"

@mcp.tool()
def two_independent_samples_z_test(sample1: List[float], sample2: List[float], population_std1: float, population_std2: float) -> str:
    """两独立样本Z检验"""
    try:
        arr1 = np.array(sample1)
        arr2 = np.array(sample2)
        n1 = len(arr1)
        n2 = len(arr2)
        if n1 < 1 or n2 < 1:
            return "错误: 两个样本量均需大于0"
        mean1 = np.mean(arr1)
        mean2 = np.mean(arr2)
        se = np.sqrt(population_std1 ** 2 / n1 + population_std2 ** 2 / n2)
        z = (mean1 - mean2) / se
        p = 2 * (1 - stats.norm.cdf(abs(z)))
        return f"两独立样本Z检验结果:\nz = {z:.4f}\np = {p:.4f} (双尾检验，95%置信水平)"
    except Exception as e:
        return f"计算失败: {str(e)}"

@mcp.tool()
def independent_sample_t_test(sample1: List[float], sample2: List[float]) -> str:
    """独立样本T检验"""
    try:
        arr1 = np.array(sample1)
        arr2 = np.array(sample2)
        stat, p_levene = stats.levene(arr1, arr2)
        equal_var = p_levene >= 0.05
        t_stat, p_value = stats.ttest_ind(arr1, arr2, equal_var=equal_var, alternative='two-sided')
        return f"独立样本T检验结果:\nt = {t_stat:.4f}\np = {p_value:.4f} (双尾检验)\n方差齐性检验p值 = {p_levene:.4f} (equal_var={equal_var})"
    except Exception as e:
        return f"计算失败: {str(e)}"

@mcp.tool()
def chi_square_goodness_of_fit(observed: List[float], expected: List[float]) -> Dict:
    """卡方拟合优度检验"""
    chi2, p = chi2_contingency(np.array([observed, expected]))[0:2]
    dof = len(observed) - 1
    return {
        "卡方值": float(chi2),
        "p值": float(p),
        "自由度": int(dof),
        "显著性": "显著" if p < 0.05 else "不显著"
    }

@mcp.tool()
def chi_square_independence(contingency_table: List[List[float]]) -> Dict:
    """卡方独立性检验"""
    chi2, p, dof, _ = chi2_contingency(np.array(contingency_table))
    return {
        "卡方值": float(chi2),
        "p值": float(p),
        "自由度": int(dof),
        "显著性": "显著" if p < 0.05 else "不显著"
    }

@mcp.tool()
def chi_square_homogeneity(groups: List[List[float]]) -> Dict:
    """卡方同质性检验"""
    chi2, p, dof, _ = chi2_contingency(np.array(groups))
    return {
        "卡方值": float(chi2),
        "p值": float(p),
        "自由度": int(dof),
        "显著性": "显著" if p < 0.05 else "不显著"
    }

if __name__ == "__main__":
    logger.info("Integrated statistics server starting")
    try:
        mcp.run(transport="stdio")
    except Exception as e:
        logger.exception("Server error")
        raise
# 在现有导入语句后添加
import math