# evaluate.py
import pandas as pd
import numpy as np
import argparse
import os
import joblib
import pickle
import json
import tensorflow as tf
from sklearn.metrics import (accuracy_score, precision_score, recall_score, 
                             f1_score, roc_auc_score, roc_curve, 
                             precision_recall_curve, confusion_matrix)
import matplotlib.pyplot as plt
import seaborn as sns

# 尝试从本地文件导入特征提取脚本的功能
# 主要需要 feature_extraction.AMINO_ACIDS 和 feature_extraction.one_hot_encode_first3aa 的结构
# 以及 feature_extraction.create_target_variable
try:
    import feature_extraction
    if not hasattr(feature_extraction, 'AMINO_ACIDS') or not feature_extraction.AMINO_ACIDS:
        print("补丁: feature_extraction.AMINO_ACIDS 未定义或为空，使用默认值。")
        feature_extraction.AMINO_ACIDS = ['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V', 'X']
    if not hasattr(feature_extraction, 'one_hot_encode_first3aa'):
        raise ImportError("feature_extraction 模块缺少 one_hot_encode_first3aa 函数。")
    if not hasattr(feature_extraction, 'create_target_variable'):
        raise ImportError("feature_extraction 模块缺少 create_target_variable 函数。")

except ImportError:
    print("警告: feature_extraction.py 未在同一目录或PYTHONPATH中找到。评估可能不准确或失败。")
    # 定义一个临时的 AMINO_ACIDS 列表和模拟函数
    class MockFeatureExtractionEvaluate:
        AMINO_ACIDS = ['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V', 'X']
        
        def one_hot_encode_first3aa(self, df, amino_acids_list_param, fitted_encoder=None):
            print("警告: 正在使用模拟的 one_hot_encode_first3aa。")
            if 'First3AA' not in df.columns: raise ValueError("Mock one_hot_encode: 'First3AA' column missing.")
            if fitted_encoder is None: raise ValueError("Mock one_hot_encode: fitted_encoder is required.")
            
            aa_cols_temp = []
            for i in range(3):
                col_name = f'AA{i+1}_eval_mock'
                df[col_name] = df['First3AA'].apply(lambda x: x[i] if len(x) == 3 and x[i] in amino_acids_list_param else 'X')
                aa_cols_temp.append(col_name)
            try:
                encoded_aa_mock = fitted_encoder.transform(df[aa_cols_temp])
                # 生成列名逻辑简化，实际应与feature_extraction.py一致或使用get_feature_names_out
                num_categories_per_aa = len(amino_acids_list_param)
                expected_encoded_cols = 3 * num_categories_per_aa
                
                if hasattr(fitted_encoder, 'get_feature_names_out'):
                    try:
                        original_feature_names = [f'AA{j+1}' for j in range(3)]
                        encoded_feature_names_mock = fitted_encoder.get_feature_names_out(original_feature_names)
                    except Exception: # Fallback if get_feature_names_out fails with simple AA1, AA2, AA3
                        encoded_feature_names_mock = [f"encoded_mock_{k}" for k in range(encoded_aa_mock.shape[1])]
                elif hasattr(fitted_encoder, 'categories_'):
                     encoded_feature_names_mock = []
                     for i_pos in range(len(fitted_encoder.categories_)): # Should be 3 positions
                         for cat_val in fitted_encoder.categories_[i_pos]:
                             encoded_feature_names_mock.append(f'AA{i_pos+1}_{cat_val}')
                else:
                    encoded_feature_names_mock = [f"encoded_mock_{k}" for k in range(encoded_aa_mock.shape[1])]


                encoded_df_mock = pd.DataFrame(encoded_aa_mock, columns=encoded_feature_names_mock[:encoded_aa_mock.shape[1]], index=df.index)
                return encoded_df_mock, fitted_encoder
            except Exception as e:
                raise ValueError(f"Mock one_hot_encode transform error: {e}.")

        def create_target_variable(self, df):
            print("警告: 正在使用模拟的 create_target_variable。")
            if not all(col in df.columns for col in ['DPP4_Digested', 'No_DPP4']):
                raise ValueError("Mock create_target: 'DPP4_Digested' or 'No_DPP4' columns missing.")
            df['DPP4_Digested'] = pd.to_numeric(df['DPP4_Digested'], errors='coerce')
            df['No_DPP4'] = pd.to_numeric(df['No_DPP4'], errors='coerce')
            df.dropna(subset=['DPP4_Digested', 'No_DPP4'], inplace=True)
            df['Resistant'] = (df['DPP4_Digested'] >= df['No_DPP4']).astype(int)
            return df

    feature_extraction = MockFeatureExtractionEvaluate()


def load_model_for_evaluation(model_path, model_type):
    """加载训练好的模型 (与predict.py中的函数类似)。"""
    if not os.path.exists(model_path):
        raise FileNotFoundError(f"模型文件未找到: {model_path}")
    
    if model_type in ['svm', 'rf']:
        model = joblib.load(model_path)
    elif model_type in ['cnn', 'lstm', 'attention', 'bert_placeholder']:
        model = tf.keras.models.load_model(model_path, compile=True) # compile=True for safety
    else:
        raise ValueError(f"不支持的模型类型: {model_type}")
    print(f"{model_type.upper()} 模型从 {model_path} 加载成功。")
    return model

def preprocess_for_evaluation(df_val_original, aa_encoder_path, scaler_path, feature_columns_path):
    """为评估准备验证数据。"""
    if not os.path.exists(aa_encoder_path): raise FileNotFoundError(f"氨基酸编码器文件未找到: {aa_encoder_path}")
    if not os.path.exists(scaler_path): raise FileNotFoundError(f"Scaler文件未找到: {scaler_path}")
    if not os.path.exists(feature_columns_path): raise FileNotFoundError(f"特征列文件未找到: {feature_columns_path}")

    with open(aa_encoder_path, 'rb') as f: aa_encoder = pickle.load(f)
    scaler = joblib.load(scaler_path)
    with open(feature_columns_path, 'r') as f: expected_feature_columns = json.load(f)
    print("预处理器 (encoder, scaler, feature_columns) 加载成功。")

    df_eval = df_val_original.copy()

    # 1. 创建目标变量 'Resistant' (如果不存在)
    if 'Resistant' not in df_eval.columns:
        print("验证数据中缺少 'Resistant' 列，尝试从 'DPP4_Digested' 和 'No_DPP4' 创建。")
        if not all(col in df_eval.columns for col in ['DPP4_Digested', 'No_DPP4']):
            raise ValueError("验证数据中缺少 'DPP4_Digested' 或 'No_DPP4' 列，无法创建目标变量。")
        df_eval = feature_extraction.create_target_variable(df_eval) # 使用 feature_extraction 中的函数
    
    y_true = df_eval['Resistant']
    
    # 2. 确保 'First3AA' 列存在
    if 'First3AA' not in df_eval.columns:
        if 'Peptide_Sequence' in df_eval.columns:
            print("验证数据中缺少 'First3AA' 列，尝试从 'Peptide_Sequence' 的前三个字符创建。")
            df_eval['First3AA'] = df_eval['Peptide_Sequence'].astype(str).str[:3]
            df_eval['First3AA'] = df_eval['First3AA'].apply(lambda x: x.ljust(3, 'X') if len(x) < 3 else x)
        else:
            raise ValueError("验证数据中必须包含 'First3AA' 列或 'Peptide_Sequence' 列。")

    # 3. 对 First3AA 进行独热编码
    encoded_aa_df, _ = feature_extraction.one_hot_encode_first3aa(df_eval, 
                                                                  feature_extraction.AMINO_ACIDS, 
                                                                  fitted_encoder=aa_encoder)
    print(f"First3AA 独热编码完成 (验证集)，生成特征数: {encoded_aa_df.shape[1]}")
    
    X_processed = encoded_aa_df # 假设这是唯一的特征来源，与训练时一致

    # 4. 对齐特征列
    X_processed = X_processed.reindex(columns=expected_feature_columns, fill_value=0)
    print(f"特征已对齐到训练时的 {len(expected_feature_columns)} 列 (验证集)。")
    if X_processed.shape[1] != len(expected_feature_columns):
        print(f"警告: 对齐后的特征数量 ({X_processed.shape[1]}) 与预期 ({len(expected_feature_columns)}) 不符。")

    # 5. 标准化特征
    X_eval_scaled = scaler.transform(X_processed)
    print("特征标准化完成 (验证集)。")
    
    return X_eval_scaled, y_true

def plot_roc_curve(y_true, y_pred_proba, output_dir):
    fpr, tpr, _ = roc_curve(y_true, y_pred_proba)
    auc_score = roc_auc_score(y_true, y_pred_proba)
    plt.figure(figsize=(8, 6))
    plt.plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (AUC = {auc_score:.2f})')
    plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('Receiver Operating Characteristic (ROC) Curve')
    plt.legend(loc="lower right")
    plt.grid(True)
    save_path = os.path.join(output_dir, "roc_curve.png")
    plt.savefig(save_path)
    plt.close()
    print(f"ROC曲线已保存到: {save_path}")

def plot_precision_recall_curve(y_true, y_pred_proba, output_dir):
    precision, recall, _ = precision_recall_curve(y_true, y_pred_proba)
    plt.figure(figsize=(8, 6))
    plt.plot(recall, precision, color='blue', lw=2, label='Precision-Recall curve')
    plt.xlabel('Recall')
    plt.ylabel('Precision')
    plt.title('Precision-Recall Curve')
    plt.ylim([0.0, 1.05])
    plt.xlim([0.0, 1.0])
    plt.legend(loc="lower left")
    plt.grid(True)
    save_path = os.path.join(output_dir, "precision_recall_curve.png")
    plt.savefig(save_path)
    plt.close()
    print(f"精确率-召回率曲线已保存到: {save_path}")

def plot_confusion_matrix(y_true, y_pred_class, output_dir, class_names=['Non-Resistant (0)', 'Resistant (1)']):
    cm = confusion_matrix(y_true, y_pred_class)
    plt.figure(figsize=(8, 6))
    sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=class_names, yticklabels=class_names)
    plt.title('Confusion Matrix')
    plt.ylabel('True Label')
    plt.xlabel('Predicted Label')
    save_path = os.path.join(output_dir, "confusion_matrix.png")
    plt.savefig(save_path)
    plt.close()
    print(f"混淆矩阵已保存到: {save_path}")

def perform_evaluation(model, model_type, X_eval_scaled, y_true, output_dir):
    """执行模型评估并保存结果。"""
    if hasattr(model, 'predict_proba'): # scikit-learn
        y_pred_proba = model.predict_proba(X_eval_scaled)[:, 1]
        y_pred_class = (y_pred_proba > 0.45).astype(int) # Default threshold
    elif hasattr(model, 'predict'): # TensorFlow
        y_pred_proba_tf = model.predict(X_eval_scaled)
        y_pred_proba = y_pred_proba_tf.ravel() if y_pred_proba_tf.ndim > 1 else y_pred_proba_tf
        y_pred_class = (y_pred_proba > 0.45).astype(int)
    else:
        raise TypeError("模型没有 'predict' 或 'predict_proba' 方法。")

    # 计算指标
    accuracy = accuracy_score(y_true, y_pred_class)
    precision_val = precision_score(y_true, y_pred_class, zero_division=0)
    recall_val = recall_score(y_true, y_pred_class, zero_division=0)
    f1_val = f1_score(y_true, y_pred_class, zero_division=0)
    try:
        roc_auc_val = roc_auc_score(y_true, y_pred_proba)
    except ValueError: # Handle cases where y_true might have only one class
        roc_auc_val = np.nan
        print("警告: ROC AUC 无法计算，因为验证集真实标签中只有一个类别。")


    metrics = {
        "Accuracy": accuracy,
        "Precision": precision_val,
        "Recall": recall_val,
        "F1-Score": f1_val,
        "ROC-AUC": roc_auc_val
    }

    print("\n--- 验证集评估结果 ---")
    for metric_name, value in metrics.items():
        print(f"  {metric_name}: {value:.4f}")
    
    cm = confusion_matrix(y_true, y_pred_class)
    print(f"  Confusion Matrix:\n{cm}")

    # 保存指标到文件
    metrics_path = os.path.join(output_dir, "evaluation_metrics.json")
    with open(metrics_path, 'w') as f:
        json.dump(metrics, f, indent=4)
    print(f"评估指标已保存到: {metrics_path}")

    # 绘制并保存图表
    if not np.isnan(roc_auc_val): # Only plot ROC if AUC is calculable
        plot_roc_curve(y_true, y_pred_proba, output_dir)
    plot_precision_recall_curve(y_true, y_pred_proba, output_dir)
    plot_confusion_matrix(y_true, y_pred_class, output_dir)

    return metrics


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="在独立的验证集上评估训练好的模型。")
    parser.add_argument("--model_path", type=str, required=True, help="已训练模型的路径 (.h5 或 .joblib)。")
    parser.add_argument("--model_type", type=str, required=True, 
                        choices=['svm', 'rf', 'cnn', 'lstm', 'attention', 'bert_placeholder'], 
                        help="模型的类型。")
    parser.add_argument("--validation_csv", type=str, required=True, help="包含独立验证数据的CSV文件。")
    parser.add_argument("--model_run_dir", type=str, required=True, 
                        help="包含scaler, encoder和feature_columns的已训练模型的运行目录。")
    parser.add_argument("--output_dir", type=str, required=True, help="保存评估结果 (指标文件和图表) 的目录。")

    args = parser.parse_args()

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)
        print(f"创建评估输出目录: {args.output_dir}")

    try:
        # 从 model_run_dir 构建 scaler, encoder, feature_columns 的路径
        scaler_path = os.path.join(args.model_run_dir, f"{args.model_type}_scaler.joblib")
        aa_encoder_path = os.path.join(args.model_run_dir, f"{args.model_type}_aa_encoder.pkl")
        feature_columns_path = os.path.join(args.model_run_dir, f"{args.model_type}_feature_columns.json")

        print(f"开始评估流程...")
        print(f"  模型路径: {args.model_path}")
        print(f"  模型类型: {args.model_type}")
        print(f"  验证CSV: {args.validation_csv}")
        print(f"  模型运行目录 (用于辅助文件): {args.model_run_dir}")
        print(f"  评估输出目录: {args.output_dir}")

        # 1. 加载验证数据
        if not os.path.exists(args.validation_csv):
            raise FileNotFoundError(f"验证CSV文件未找到: {args.validation_csv}")
        validation_df_original = pd.read_csv(args.validation_csv)
        
        # 2. 预处理验证数据
        X_eval_scaled, y_true_eval = preprocess_for_evaluation(validation_df_original, 
                                                                aa_encoder_path, 
                                                                scaler_path, 
                                                                feature_columns_path)
        print(f"验证数据预处理完成，特征形状: {X_eval_scaled.shape}, 真实标签数量: {len(y_true_eval)}")
        if X_eval_scaled.shape[0] == 0:
            raise ValueError("预处理后没有数据可供评估。")
        if len(y_true_eval) == 0:
            raise ValueError("预处理后没有真实标签可供评估。")
        if X_eval_scaled.shape[0] != len(y_true_eval):
            raise ValueError(f"特征数量 ({X_eval_scaled.shape[0]}) 与标签数量 ({len(y_true_eval)}) 不匹配。")


        # 3. 加载模型
        model_to_evaluate = load_model_for_evaluation(args.model_path, args.model_type)

        # 4. 执行评估
        evaluation_metrics = perform_evaluation(model_to_evaluate, 
                                                args.model_type, 
                                                X_eval_scaled, 
                                                y_true_eval, 
                                                args.output_dir)
        
        print("\n评估成功完成。")

    except FileNotFoundError as e: print(f"文件错误: {e}")
    except ValueError as e: print(f"值错误: {e}")
    except IOError as e: print(f"IO错误: {e}")
    except RuntimeError as e: print(f"运行时错误: {e}")
    except Exception as e:
        print(f"评估过程中发生未预料的错误: {e}")
        import traceback
        traceback.print_exc()
