from fastapi import APIRouter, HTTPException, Body
from typing import Dict, Any, List, Optional, Union
import numpy as np
import pandas as pd
import logging
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.svm import SVC, SVR
from sklearn.metrics import (accuracy_score, precision_score, recall_score, f1_score, 
                            r2_score, mean_squared_error, mean_absolute_error, confusion_matrix,
                            roc_curve, auc, roc_auc_score)
from pydantic import BaseModel
import traceback

router = APIRouter()
logger = logging.getLogger(__name__)

# 配置日志
logging.basicConfig(level=logging.INFO)

class MLModelRequest(BaseModel):
    data: List[Dict[str, Any]]
    feature_columns: List[str]
    target_column: str
    test_size: float = 0.2
    random_state: int = 42
    # 其他模型特定参数可以添加到子类中

class LogisticRegressionRequest(MLModelRequest):
    C: float = 1.0
    max_iter: int = 100
    penalty: str = 'l2'

class RandomForestRequest(MLModelRequest):
    n_estimators: int = 100
    max_depth: Optional[int] = None
    min_samples_split: int = 2

class SVMRequest(MLModelRequest):
    C: float = 1.0
    kernel: str = 'rbf'
    gamma: str = 'scale'

class MLModelResponse(BaseModel):
    success: bool
    message: str
    metrics: Optional[Dict[str, Any]] = None
    model_params: Optional[Dict[str, Any]] = None
    feature_importance: Optional[Dict[str, float]] = None
    training_info: Optional[Dict[str, Any]] = None

def handle_nan_values(data):
    """处理数据中的NaN值"""
    try:
        df = pd.DataFrame(data)
        # 将NaN值替换为None，使其可JSON序列化
        df = df.replace({np.nan: None})
        return df
    except Exception as e:
        logger.error(f"处理NaN值时出错: {str(e)}")
        raise HTTPException(status_code=500, detail=f"处理数据时出错: {str(e)}")

def calculate_metrics(y_true, y_pred, model=None, X_test=None):
    """计算模型性能指标，包括ROC曲线"""
    try:
        # 保证所有计算结果都是有效的JSON值
        def safe_float(value):
            """处理可能的无穷或NaN值，使其JSON可序列化"""
            if np.isnan(value) or np.isinf(value):
                return 0.0
            return float(value)
        
        metrics = {
            "accuracy": safe_float(accuracy_score(y_true, y_pred)),
            "precision": safe_float(precision_score(y_true, y_pred, average='weighted', zero_division=0)),
            "recall": safe_float(recall_score(y_true, y_pred, average='weighted', zero_division=0)),
            "f1": safe_float(f1_score(y_true, y_pred, average='weighted', zero_division=0)),
            "confusion_matrix": confusion_matrix(y_true, y_pred).tolist()
        }
        
        # 如果是二分类问题并且提供了模型和测试数据，计算ROC曲线
        if model is not None and X_test is not None and len(np.unique(y_true)) == 2:
            try:
                # 获取预测概率
                if hasattr(model, 'predict_proba'):
                    y_score = model.predict_proba(X_test)[:, 1]
                    # 计算ROC曲线
                    fpr, tpr, thresholds = roc_curve(y_true, y_score)
                    roc_auc = auc(fpr, tpr)
                    
                    # 处理可能的无穷值或NaN
                    fpr_list = [safe_float(x) for x in fpr]
                    tpr_list = [safe_float(x) for x in tpr]
                    thresholds_list = [safe_float(x) for x in thresholds]
                    
                    # 将numpy数组转换为列表，以便JSON序列化
                    metrics["roc_curve"] = {
                        "fpr": fpr_list,
                        "tpr": tpr_list,
                        "thresholds": thresholds_list,
                        "auc": safe_float(roc_auc)
                    }
                    logger.info(f"成功计算ROC曲线，AUC值: {roc_auc}")
                else:
                    logger.warning("模型不支持概率预测，无法计算ROC曲线")
            except Exception as roc_error:
                logger.error(f"计算ROC曲线时出错: {str(roc_error)}")
                logger.exception("ROC曲线计算详细错误")
        else:
            logger.info(f"不计算ROC曲线，原因: 类别数={len(np.unique(y_true))}, model存在={model is not None}, X_test存在={X_test is not None}")
        
        return metrics
    except Exception as e:
        logger.error(f"计算指标时出错: {str(e)}")
        logger.exception("计算指标详细错误")
        return {"error": str(e)}

def prepare_data(request):
    """准备训练和测试数据"""
    try:
        df = handle_nan_values(request.data)
        
        # 检查特征列和目标列是否存在
        missing_cols = [col for col in request.feature_columns + [request.target_column] if col not in df.columns]
        if missing_cols:
            raise HTTPException(status_code=400, detail=f"数据中缺少以下列: {missing_cols}")
        
        X = df[request.feature_columns]
        y = df[request.target_column]
        
        # 处理分类目标变量
        if not pd.api.types.is_numeric_dtype(y):
            logger.info(f"目标列 '{request.target_column}' 是分类变量，进行标签编码")
            # 简单的标签编码
            unique_values = y.unique()
            value_to_index = {value: i for i, value in enumerate(unique_values)}
            y = y.map(value_to_index)
        
        # 划分训练集和测试集
        X_train, X_test, y_train, y_test = train_test_split(
            X, y, 
            test_size=request.test_size, 
            random_state=request.random_state
        )
        
        return X_train, X_test, y_train, y_test
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"准备数据时出错: {traceback.format_exc()}")
        raise HTTPException(status_code=500, detail=f"准备数据时出错: {str(e)}")

def logistic_regression_core(df: pd.DataFrame, feature_columns: list, target_column: str, C: float = 1.0, max_iter: int = 100, penalty: str = 'l2', test_size: float = 0.2, random_state: int = 42):
    """核心逻辑回归训练函数，供API和本地调用"""
    # 检查特征列和目标列是否存在
    missing_cols = [col for col in feature_columns + [target_column] if col not in df.columns]
    if missing_cols:
        raise ValueError(f"数据中缺少以下列: {missing_cols}")
    X = df[feature_columns]
    y = df[target_column]
    # 处理分类目标变量
    if not pd.api.types.is_numeric_dtype(y):
        unique_values = y.unique()
        value_to_index = {value: i for i, value in enumerate(unique_values)}
        y = y.map(value_to_index)
    # 划分训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=test_size, random_state=random_state
    )
    # 创建并训练模型
    model = LogisticRegression(
        C=C,
        max_iter=max_iter,
        penalty=penalty,
        random_state=random_state
    )
    model.fit(X_train, y_train)
    # 预测并评估
    y_pred = model.predict(X_test)
    
    # 添加调试信息
    logger.info(f"逻辑回归模型训练完成，开始评估")
    logger.info(f"目标变量唯一值: {np.unique(y_test)}")
    logger.info(f"是否为二分类问题: {len(np.unique(y_test)) == 2}")
    
    # 计算评估指标
    metrics = calculate_metrics(y_test, y_pred, model, X_test)
    
    # 打印ROC曲线数据
    if 'roc_curve' in metrics:
        logger.info(f"成功生成ROC曲线数据")
        logger.info(f"ROC曲线数据摘要: FPR长度={len(metrics['roc_curve']['fpr'])}, TPR长度={len(metrics['roc_curve']['tpr'])}, AUC={metrics['roc_curve']['auc']}")
        logger.info(f"ROC曲线前5个点: {list(zip(metrics['roc_curve']['fpr'][:5], metrics['roc_curve']['tpr'][:5]))}")
    else:
        logger.warning(f"未生成ROC曲线数据")
    
    # 安全处理浮点数
    def safe_float(value):
        if np.isnan(value) or np.isinf(value):
            return 0.0
        return float(value)
    
    # 获取特征重要性（系数）
    feature_importance = {}
    for i, feature in enumerate(feature_columns):
        if len(model.coef_.shape) > 1:
            importance = safe_float(np.mean(np.abs(model.coef_[:, i])))
        else:
            importance = safe_float(np.abs(model.coef_[i]))
        feature_importance[feature] = importance
    # 训练信息
    training_info = {
        "n_features": len(feature_columns),
        "n_samples_train": X_train.shape[0],
        "n_samples_test": X_test.shape[0],
        "classes": list(map(str, model.classes_.tolist())),
        "model_type": "LogisticRegression"
    }
    return metrics, feature_importance, training_info

def random_forest_core(df: pd.DataFrame, feature_columns: list, target_column: str, n_estimators: int = 100, max_depth: int = None, min_samples_split: int = 2, min_samples_leaf: int = 1, criterion: str = 'gini', test_size: float = 0.2, random_state: int = 42):
    """核心随机森林训练函数，供API和本地调用"""
    # 检查特征列和目标列是否存在
    missing_cols = [col for col in feature_columns + [target_column] if col not in df.columns]
    if missing_cols:
        raise ValueError(f"数据中缺少以下列: {missing_cols}")
    
    X = df[feature_columns]
    y = df[target_column]
    
    # 处理分类目标变量
    if not pd.api.types.is_numeric_dtype(y):
        unique_values = y.unique()
        value_to_index = {value: i for i, value in enumerate(unique_values)}
        y = y.map(value_to_index)
    
    # 划分训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=test_size, random_state=random_state
    )
    
    # 创建并训练模型
    model = RandomForestClassifier(
        n_estimators=n_estimators,
        max_depth=max_depth,
        min_samples_split=min_samples_split,
        min_samples_leaf=min_samples_leaf,
        criterion=criterion,
        random_state=random_state
    )
    model.fit(X_train, y_train)
    
    # 预测并评估
    y_pred = model.predict(X_test)
    
    # 添加调试信息
    logger.info(f"随机森林模型训练完成，开始评估")
    logger.info(f"目标变量唯一值: {np.unique(y_test)}")
    logger.info(f"是否为二分类问题: {len(np.unique(y_test)) == 2}")
    
    # 计算评估指标
    metrics = calculate_metrics(y_test, y_pred, model, X_test)
    
    # 安全处理浮点数
    def safe_float(value):
        if np.isnan(value) or np.isinf(value):
            return 0.0
        return float(value)
    
    # 获取特征重要性
    feature_importance = {}
    for i, feature in enumerate(feature_columns):
        feature_importance[feature] = safe_float(model.feature_importances_[i])
    
    # 训练信息
    training_info = {
        "n_features": len(feature_columns),
        "n_samples_train": X_train.shape[0],
        "n_samples_test": X_test.shape[0],
        "classes": list(map(str, model.classes_.tolist())),
        "model_type": "RandomForest",
        "feature_names": feature_columns
    }
    
    return metrics, feature_importance, training_info

def svm_core(df: pd.DataFrame, feature_columns: list, target_column: str, C: float = 1.0, kernel: str = 'rbf', gamma: str = 'scale', degree: int = 3, test_size: float = 0.2, random_state: int = 42):
    """核心SVM训练函数，供API和本地调用"""
    # 检查特征列和目标列是否存在
    missing_cols = [col for col in feature_columns + [target_column] if col not in df.columns]
    if missing_cols:
        raise ValueError(f"数据中缺少以下列: {missing_cols}")
    
    X = df[feature_columns]
    y = df[target_column]
    
    # 处理分类目标变量
    if not pd.api.types.is_numeric_dtype(y):
        unique_values = y.unique()
        value_to_index = {value: i for i, value in enumerate(unique_values)}
        y = y.map(value_to_index)
    
    # 划分训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=test_size, random_state=random_state
    )
    
    # 创建并训练模型
    model = SVC(
        C=C,
        kernel=kernel,
        gamma=gamma,
        degree=degree if kernel == 'poly' else 3,
        probability=True,  # 启用概率估计以支持ROC曲线计算
        random_state=random_state
    )
    model.fit(X_train, y_train)
    
    # 预测并评估
    y_pred = model.predict(X_test)
    
    # 添加调试信息
    logger.info(f"SVM模型训练完成，开始评估")
    logger.info(f"目标变量唯一值: {np.unique(y_test)}")
    logger.info(f"是否为二分类问题: {len(np.unique(y_test)) == 2}")
    
    # 计算评估指标
    metrics = calculate_metrics(y_test, y_pred, model, X_test)
    
    # 安全处理浮点数
    def safe_float(value):
        if np.isnan(value) or np.isinf(value):
            return 0.0
        return float(value)
    
    # SVM没有直接的特征重要性，可以使用权重系数（仅对线性核有意义）
    feature_importance = {}
    if hasattr(model, 'coef_'):
        for i, feature in enumerate(feature_columns):
            if len(model.coef_.shape) > 1:
                importance = safe_float(np.mean(np.abs(model.coef_[:, i])))
            else:
                importance = safe_float(np.abs(model.coef_[i]))
            feature_importance[feature] = importance
    else:
        # 对于非线性核，通过排列重要性或其他方法估计特征重要性
        # 这里使用一个简单的占位实现，给所有特征相同的重要性
        for feature in feature_columns:
            feature_importance[feature] = 1.0 / len(feature_columns)
        logger.info("SVM模型使用非线性核，特征重要性设置为均匀值")
    
    # 训练信息
    training_info = {
        "n_features": len(feature_columns),
        "n_samples_train": X_train.shape[0],
        "n_samples_test": X_test.shape[0],
        "classes": list(map(str, model.classes_.tolist())),
        "model_type": "SVM",
        "kernel": kernel,
        "feature_names": feature_columns
    }
    
    return metrics, feature_importance, training_info

@router.post("/logistic_regression")
async def train_logistic_regression(request: LogisticRegressionRequest):
    """训练逻辑回归模型并返回结果"""
    try:
        logger.info("开始训练逻辑回归模型...")
        df = handle_nan_values(request.data)
        metrics, feature_importance, training_info = logistic_regression_core(
            df,
            request.feature_columns,
            request.target_column,
            C=request.C,
            max_iter=request.max_iter,
            penalty=request.penalty,
            test_size=request.test_size,
            random_state=request.random_state
        )
        logger.info("逻辑回归模型训练完成")
        return MLModelResponse(
            success=True,
            message="逻辑回归模型训练成功",
            metrics=metrics,
            model_params={
                "C": request.C,
                "max_iter": request.max_iter,
                "penalty": request.penalty
            },
            feature_importance=feature_importance,
            training_info=training_info
        )
    except Exception as e:
        logger.error(f"训练逻辑回归模型时出错: {traceback.format_exc()}")
        raise HTTPException(status_code=500, detail=f"训练逻辑回归模型时出错: {str(e)}")

@router.post("/random_forest")
async def train_random_forest(request: RandomForestRequest):
    """训练随机森林模型并返回结果"""
    try:
        logger.info("开始训练随机森林模型...")
        X_train, X_test, y_train, y_test = prepare_data(request)
        
        # 创建并训练模型
        model = RandomForestClassifier(
            n_estimators=request.n_estimators,
            max_depth=request.max_depth,
            min_samples_split=request.min_samples_split,
            random_state=request.random_state
        )
        model.fit(X_train, y_train)
        
        # 预测并评估
        y_pred = model.predict(X_test)
        metrics = calculate_metrics(y_test, y_pred, model, X_test)
        
        # 安全处理浮点数
        def safe_float(value):
            if np.isnan(value) or np.isinf(value):
                return 0.0
            return float(value)
        
        # 获取特征重要性
        feature_importance = {}
        for i, feature in enumerate(request.feature_columns):
            feature_importance[feature] = safe_float(model.feature_importances_[i])
        
        # 整理模型参数
        model_params = {
            "n_estimators": request.n_estimators,
            "max_depth": request.max_depth,
            "min_samples_split": request.min_samples_split,
            "classes": model.classes_.tolist()
        }
        
        # 训练信息
        training_info = {
            "n_features": len(request.feature_columns),
            "n_samples_train": X_train.shape[0],
            "n_samples_test": X_test.shape[0],
            "model_type": "RandomForest"
        }
        
        logger.info("随机森林模型训练完成")
        return MLModelResponse(
            success=True,
            message="随机森林模型训练成功",
            metrics=metrics,
            model_params=model_params,
            feature_importance=feature_importance,
            training_info=training_info
        )
    except Exception as e:
        logger.error(f"训练随机森林模型时出错: {traceback.format_exc()}")
        raise HTTPException(status_code=500, detail=f"训练随机森林模型时出错: {str(e)}")

@router.post("/svm")
async def train_svm(request: SVMRequest):
    """训练SVM模型并返回结果"""
    try:
        logger.info("开始训练SVM模型...")
        X_train, X_test, y_train, y_test = prepare_data(request)
        
        # 创建并训练模型
        model = SVC(
            C=request.C,
            kernel=request.kernel,
            gamma=request.gamma,
            probability=True,  # 启用概率估计以支持ROC曲线计算
            random_state=request.random_state
        )
        model.fit(X_train, y_train)
        
        # 预测并评估
        y_pred = model.predict(X_test)
        metrics = calculate_metrics(y_test, y_pred, model, X_test)
        
        # 安全处理浮点数
        def safe_float(value):
            if np.isnan(value) or np.isinf(value):
                return 0.0
            return float(value)
        
        # SVM没有直接的特征重要性，尝试使用系数进行估计
        feature_importance = {}
        if hasattr(model, 'coef_'):
            for i, feature in enumerate(request.feature_columns):
                if len(model.coef_.shape) > 1:
                    importance = safe_float(np.mean(np.abs(model.coef_[:, i])))
                else:
                    importance = safe_float(np.abs(model.coef_[i]))
                feature_importance[feature] = importance
        else:
            # 为每个特征设置一个默认值，表示无法计算特征重要性
            for feature in request.feature_columns:
                feature_importance[feature] = 0.0
            logger.info("SVM模型不提供特征重要性，设置为默认值")
        
        # 整理模型参数
        model_params = {
            "C": request.C,
            "kernel": request.kernel,
            "gamma": request.gamma
        }
        
        # 训练信息
        training_info = {
            "n_features": len(request.feature_columns),
            "n_samples_train": X_train.shape[0],
            "n_samples_test": X_test.shape[0],
            "model_type": "SVM"
        }
        
        logger.info("SVM模型训练完成")
        return MLModelResponse(
            success=True,
            message="SVM模型训练成功",
            metrics=metrics,
            model_params=model_params,
            feature_importance=feature_importance,
            training_info=training_info
        )
    except Exception as e:
        logger.error(f"训练SVM模型时出错: {traceback.format_exc()}")
        raise HTTPException(status_code=500, detail=f"训练SVM模型时出错: {str(e)}") 