from fastapi import APIRouter, HTTPException
from pydantic import BaseModel

import numpy as np
import pandas as pd
import json
import os
from typing import List, Optional
import statsmodels.api as sm
import matplotlib.pyplot as plt
from scipy.stats import zscore, probplot
from app.common.utils import some_utility_function, DEST_DIR

import matplotlib.pyplot as plt
import scipy.stats as stats
import statsmodels.formula.api as smf

from statsmodels.stats.outliers_influence import variance_inflation_factor
from scipy.stats import pearsonr, spearmanr, kendalltau, f
from statsmodels.stats.anova import anova_lm
import pingouin as pg

router = APIRouter()


class ParamFile(BaseModel):
    file_path: str = ""


class ParamPP(BaseModel):
    file_path: str = ""


@router.post("/descriptiveStatistic", tags=["examinations"], description="描述性统计")
async def descriptive_statistic(param_data: ParamFile):
    """
    读取CSV文件并返回描述性统计信息（以JSON格式返回）
    参数：
        file_path (str): CSV文件的路径。
    返回：
        str: 数据的描述性统计信息（JSON格式字符串）。
    """
    # 读取CSV文件
    try:
        data = pd.read_csv(param_data.file_path)
    except FileNotFoundError:
        return json.dumps({"error": "File not found."})
    except pd.errors.EmptyDataError:
        return json.dumps({"error": "File is empty."})
    except Exception as e:
        return json.dumps({"error": f"An error occurred: {str(e)}"})

    # 获取描述性统计信息
    stats = data.describe().to_dict()  # 转换为字典

    # 返回为JSON格式字符串
    return json.dumps(stats, indent=4, ensure_ascii=False)


@router.get("/correlationAnalysis", tags=["examinations"], description="相关性分析")
async def correlation_analysis():
    return {"R": 1.0}


def generate_pp_plot(data, column_name, save_path):
    """
    生成正态P-P图并保存至指定路径

    参数：
        data (pandas.DataFrame): 输入的数据
        column_name (str): 要生成P-P图的列名
        save_path (str): 图像保存的文件路径
    """
    # 获取数据
    column_data = data[column_name].dropna()

    # 创建正态P-P图
    fig = plt.figure(figsize=(8, 6))
    ax = fig.add_subplot(111)
    stats.probplot(column_data, dist="norm", plot=ax)

    # 保存图像到指定文件夹
    plt.title(f"P-P Plot of {column_name}")
    plt.savefig(save_path)
    plt.close()


@router.post(
    "/generatePpDiagram",
    tags=["examinations"],
    description="正太P-P图, 可以用来判断数据是否符合正态分布-数据点均匀分布于直线两侧",
)
async def pp_diagram(param_data: ParamPP):
    """
    根据文件路径生成正态P-P图并保存图像
    参数：
        file_path (str): 文件的完整路径，支持CSV或XLSX格式
    返回：
        dict: 返回生成的P-P图路径
    """
    file_path = param_data.file_path
    # 检查文件是否存在
    if not os.path.exists(file_path):
        return json.dumps({"error": "File not found."})

    # 根据文件类型读取数据
    if file_path.endswith(".csv"):
        data = pd.read_csv(file_path)
    elif file_path.endswith(".xlsx"):
        data = pd.read_excel(file_path)
    else:
        return json.dumps(
            {"error": "Unsupported file format. Please provide a CSV or XLSX file."}
        )

    # 创建文件夹（与文件名相同）
    output_folder = os.path.join(
        os.path.dirname(file_path), os.path.splitext(os.path.basename(file_path))[0]
    )
    if not os.path.exists(output_folder):
        os.makedirs(output_folder)

    # 生成每个数值列的P-P图
    pp_plot_paths = []
    for column_name in data.select_dtypes(include=["number"]).columns:  # 筛选数值列
        pp_plot_path = os.path.join(output_folder, f"{column_name}_pp_plot.png")
        generate_pp_plot(data, column_name, pp_plot_path)
        pp_plot_paths.append({"imgSrc": pp_plot_path, "columnName": column_name})

    return json.dumps(pp_plot_paths)


@router.get(
    "/residualPlot",
    tags=["examinations"],
    description="残差图(散点图的一种), 回归标准化残差-散点图, 进行 方差齐性判断，查看散点图中的回归标准化残差是否均匀分布于y=0直线两侧。如果均匀分布，提示方差齐性，可以进行线性回归分析，如果数据点呈扇形或漏斗状分布，则提示方差不齐，不满足进行线性回归分析的条件",
)
async def residual_plot():
    return {"R": 1.0}


""" @router.get("/scatterDiagram", tags=["examinations"], description="散点图,散点均均分布，代表模型拟合良好")
async def scatter_diagram():
    return {"R": 1.0} """


@router.get(
    "/histogram",
    tags=["examinations"],
    description="直方图,回归标准化残差-直方图 如果残差直观上满足正态性，说明模型构建的好",
)
async def histogram():
    return {"R": 1.0}


# 定义请求体模型
class RegressionRequest(BaseModel):
    file_path: str  # 文件路径
    independent_vars: List[str]  # 自变量列表
    dependent_var: str  # 因变量


import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import norm


def plot_residual_histogram(standardized_residuals_param, hist_plot_path):
    plt.figure()
    plt.hist(standardized_residuals_param, bins=20, alpha=0.7, edgecolor="black")
    plt.xlabel("Standardized Residuals")
    plt.ylabel("Frequency")
    plt.title("Histogram of Standardized Residuals")
    plt.savefig(hist_plot_path)
    plt.close()


def get_anova_results(independent_vars_param, dependent_var_param, df_param):
    # 检查independent_vars和dependent_var是否正确传入
    independent_vars = "+".join(independent_vars_param)  # 如果有多个自变量
    formula = f"{dependent_var_param} ~ {independent_vars}"  # 回归公式

    # 使用smf.ols()进行回归分析
    model = smf.ols(formula, data=df_param).fit()

    # 获取方差分析表
    anova_row_result = anova_lm(model)

    # print(anova_row_result)

    # 计算回归平方和（回归项的平方和是所有自变量的平方和之和）
    regression_ss = anova_row_result["sum_sq"][
        :-1
    ].sum()  # 去掉 Residual 行，只加回归项的平方和
    regression_df = len(independent_vars_param)  # 回归自由度等于自变量的个数

    # 计算残差平方和和残差均方
    residual_ss = anova_row_result.loc["Residual", "sum_sq"]
    residual_df = anova_row_result.loc["Residual", "df"]
    residual_mean_sq = residual_ss / residual_df

    # 计算回归均方
    regression_mean_sq = regression_ss / regression_df
    # 计算 F 值
    F_value = regression_mean_sq / residual_mean_sq

    # 计算 P 值
    p_value = 1 - f.cdf(F_value, regression_df, residual_df)

    # 创建回归行
    regression_row = pd.DataFrame(
        {
            "sum_sq": [regression_ss],  # 回归平方和
            "df": [regression_df],  # 自变量的个数
            "mean_sq": [regression_mean_sq],  # 回归均方
            "F": [F_value],  # F值
            "PR(>F)": [p_value]  # P值
        },
        index=["Regression"],
    )

    # 手动计算总平方和并添加到结果中
    total_ss = regression_row["sum_sq"].sum()  # 总平方和 = 回归平方和 + 残差平方和
    total_row = pd.DataFrame(
        {
            "sum_sq": [total_ss],  # 总平方和
            "df": [df_param.shape[0] - 1],  # 总自由度 = 样本数 - 1
            "mean_sq": [None],  # 总均方不需要显示
            "F": [None],  # 总F值不需要显示
            "PR(>F)": [None],  # 总p值不需要显示
        },
        index=["Total"],
    )

    # 将回归行、方差分析表和总行合并
    anova_row_result = pd.concat([regression_row, anova_row_result, total_row])

    # return anova_row_result.to_string(index=True)
    return anova_row_result.to_json(orient="split")


@router.post(
    "/examinations/RegressionAnalysis/",
    tags=["examinations"],
    description="获取回归分析结果",
)
async def perform_regression(request: RegressionRequest):
    try:
        # 加载文件
        if request.file_path.endswith(".xlsx"):
            df = pd.read_excel(request.file_path)
        elif request.file_path.endswith(".csv"):
            df = pd.read_csv(request.file_path)
        else:
            raise HTTPException(
                status_code=400, detail="文件格式不支持，仅支持 .xlsx 和 .csv 文件"
            )

        # 提取自变量和因变量
        X = df[request.independent_vars]
        y = df[request.dependent_var]

        # 添加截距项
        X = sm.add_constant(X)

        # 回归分析
        model = sm.OLS(y, X).fit()

        # 获取方差分析表
        anova_results = get_anova_results(
            request.independent_vars, request.dependent_var, df
        )

        # 获取残差和拟合值
        residuals = model.resid
        fitted_values = model.fittedvalues
        # standardized_residuals = zscore(residuals)  # 标准化残差(原始版本)

        # 标准化残差-spss方式
        residual_std_error = np.std(residuals)  # 计算样本标准差
        standardized_residuals = residuals / residual_std_error  # 标准化残差

        # 创建保存图片的文件夹
        file_dir = os.path.dirname(request.file_path)
        file_name = os.path.splitext(os.path.basename(request.file_path))[0]
        output_dir = os.path.join(file_dir, file_name)
        os.makedirs(output_dir, exist_ok=True)

        # 图片文件路径
        plot_paths = []

        # 图表 1: 正态 P-P 图
        plt.figure()
        probplot(standardized_residuals, dist="norm", plot=plt)
        plt.title("Normal P-P Plot of Standardized Residuals")
        pp_plot_path = os.path.join(output_dir, "normal_pp_plot.png")
        plt.savefig(pp_plot_path)
        plt.close()
        plot_paths.append(pp_plot_path)

        # 图表 2: 散点图 (残差 vs 拟合值)
        plt.figure()
        plt.scatter(fitted_values, residuals, alpha=0.7)
        plt.axhline(0, color="red", linestyle="--", linewidth=1)
        plt.xlabel("Fitted Values")
        plt.ylabel("Residuals")
        plt.title("Residuals vs Fitted Values  of Standardized Residuals")
        scatter_plot_path = os.path.join(output_dir, "residuals_scatter_plot.png")
        plt.savefig(scatter_plot_path)
        plt.close()
        plot_paths.append(scatter_plot_path)

        # 图表 3: 残差直方图

        histogram_plot_path = os.path.join(output_dir, "residuals_histogram.png")

        plot_residual_histogram(standardized_residuals, histogram_plot_path)
        plot_paths.append(histogram_plot_path)

        # 返回结果
        result = {
            "summaryText": model.summary().as_text(),  # 完整的回归分析结果
            "model_summary": {
                "r_squared": model.rsquared,
                "adjusted_r_squared": model.rsquared_adj,
                "f_statistic": model.fvalue,
                "p_value_f_statistic": model.f_pvalue,
            },
            "anova_results": anova_results,
            "coefficients_table": [
                {
                    "variable": var,
                    "coefficient": model.params[var],
                    "p_value": model.pvalues[var],
                    "std_error": model.bse[var],
                }
                for var in model.params.index
            ],
            "plot_picture_paths": plot_paths,  # 图片路径列表
        }
        return result

    except FileNotFoundError:
        raise HTTPException(status_code=404, detail="文件未找到")
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))


# 请求参数模型
class VariableInfo(BaseModel):
    name: str  # 变量名称
    valueType: str  # 变量类型（连续、分类等）
    variantCType: str  # 变量类别（自变量、因变量）
    isNormalDistribution: bool  # 是否符合正态分布


class AnalysisRequest(BaseModel):
    file_path: str  # 文件路径
    variables: List[VariableInfo]  # 变量信息数组


# 计算VIF的函数
def calculate_vif(data: pd.DataFrame):
    vif_data = pd.DataFrame()
    vif_data["Variable"] = data.columns
    vif_data["VIF"] = [
        variance_inflation_factor(data.values, i) for i in range(data.shape[1])
    ]
    return vif_data


# 计算相关性的函数
def calculate_correlation(
    data_x: pd.DataFrame,
    data_y: Optional[pd.DataFrame],
    variables_info: List[VariableInfo],
):
    correlations = []

    # 定义选择相关分析方法的函数
    def determine_correlation_method(type1, type2):
        if type1 == "连续" and type2 == "连续":
            return pearsonr, "Pearson"
        elif type1 == "分类" or type2 == "分类":
            return kendalltau, "Kendall"
        else:
            return spearmanr, "Spearman"

    def get_variable_type(name):
        for var in variables_info:
            if var.name == name:
                return var.valueType
        return "连续"  # 默认值

    if data_y is not None:
        # 因变量也转换为 DataFrame 格式
        for y_col in data_y.columns:
            for x_col in data_x.columns:
                x_type = get_variable_type(x_col)
                y_type = get_variable_type(y_col)
                corr_func, method_str = determine_correlation_method(x_type, y_type)
                corr, _ = corr_func(data_x[x_col], data_y[y_col])
                correlations.append(
                    {
                        "VariablePair": f"{x_col} vs {y_col}",
                        "Correlation": corr,
                        "Method": method_str,
                    }
                )
    else:
        # 自变量间相关性
        columns = data_x.columns
        for i in range(len(columns)):
            for j in range(i + 1, len(columns)):
                var1, var2 = columns[i], columns[j]
                type1 = get_variable_type(var1)
                type2 = get_variable_type(var2)
                corr_func, method_str = determine_correlation_method(type1, type2)
                corr, _ = corr_func(data_x[var1], data_x[var2])
                correlations.append(
                    {
                        "VariablePair": f"{var1} vs {var2}",
                        "Correlation": corr,
                        "Method": method_str,
                    }
                )

    return correlations


@router.post(
    "/hypothesisTesting", tags=["examinations"], description="相关性和vif值计算"
)
async def analyze_data(request: AnalysisRequest):
    # 验证文件是否存在
    if not os.path.exists(request.file_path):
        raise HTTPException(status_code=400, detail="File not found")

    # 读取文件
    try:
        if request.file_path.endswith(".csv"):
            data = pd.read_csv(request.file_path)
        elif request.file_path.endswith(".xlsx"):
            data = pd.read_excel(request.file_path)
        else:
            raise HTTPException(status_code=400, detail="Unsupported file format")
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"Error reading file: {str(e)}")

    # 提取变量信息
    variable_names = [var.name for var in request.variables]
    try:
        selected_data = data[variable_names]
    except KeyError as e:
        raise HTTPException(status_code=400, detail=f"Variable not found: {str(e)}")

    # 分离自变量和因变量
    x_vars = [var.name for var in request.variables if var.variantCType == "自变量"]
    y_vars = [var.name for var in request.variables if var.variantCType == "因变量"]

    if not x_vars:
        raise HTTPException(status_code=400, detail="No independent variables provided")

    x_data = selected_data[x_vars]
    y_data = selected_data[y_vars] if y_vars else None

    # 计算VIF
    try:
        vif_result = calculate_vif(x_data)
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"Error calculating VIF: {str(e)}")

    # 计算相关性
    try:
        correlation_result = calculate_correlation(x_data, y_data, request.variables)
    except Exception as e:
        raise HTTPException(
            status_code=500, detail=f"Error calculating correlation: {str(e)}"
        )

    # 返回结果
    return {
        "vif": vif_result.to_dict(orient="records"),
        "correlations": correlation_result,
    }


@router.post(
    "/examinations/MediationTesting/",
    tags=["examinations"],
    description="获取中介效应检验结果",
)
async def mediation_analysis(param_data: ParamFile):
    # 读取上传的文件并加载数据
    try:
        if param_data.file_path.endswith(".csv"):
            data = pd.read_csv(param_data.file_path)
        elif param_data.file_path.endswith(".xlsx"):
            data = pd.read_csv(param_data.file_path)
        else:
            raise HTTPException(status_code=400, detail="Unsupported file format")
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"Error reading file: {str(e)}")
    
    # 方法一：使用 pingouin 的中介分析
    try:
        pg_results = pg.mediation_analysis(data=data, x="X", m="M", y="Y", alpha=0.05)
        pg_results_dict = pg_results.to_dict(orient="records")  # 转为字典列表格式
    except Exception as e:
        pg_results_dict = {"error": str(e)}
    
    # 方法二：使用 statsmodels 手动建模
    try:
        # 回归模型：X -> M
        model_x_to_m = smf.ols("M ~ X", data=data).fit()
        # 回归模型：X, M -> Y
        model_xm_to_y = smf.ols("Y ~ X + M", data=data).fit()
        # 回归模型：X -> Y
        model_x_to_y = smf.ols("Y ~ X", data=data).fit()

        statsmodels_results = {
            "model_x_to_m": {
                "summary": model_x_to_m.summary().as_text(),
                "coefficients": model_x_to_m.params.to_dict(),
            },
            "model_xm_to_y": {
                "summary": model_xm_to_y.summary().as_text(),
                "coefficients": model_xm_to_y.params.to_dict(),
            },
            "model_x_to_y": {
                "summary": model_x_to_y.summary().as_text(),
                "coefficients": model_x_to_y.params.to_dict(),
            },
        }
    except Exception as e:
        statsmodels_results = {"error": str(e)}
    
    # 返回结果
    return {
        "pingouin_results": pg_results_dict,
        "statsmodels_results": statsmodels_results,
    }