# train_model.py
import pandas as pd
import numpy as np
import argparse
import os
import joblib # For saving scikit-learn models
import pickle # For saving other objects like encoders if needed
import json # Added for saving feature columns
from datetime import datetime

from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_auc_score, confusion_matrix

import tensorflow as tf
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Conv1D, MaxPooling1D, Flatten, LSTM, Dropout, BatchNormalization, Input # Removed Embedding, GlobalAveragePooling1D as not directly used by current simple models
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
# from tensorflow.keras.utils import to_categorical # Not used for binary classification with sigmoid

# 尝试从本地文件导入特征提取脚本的功能
try:
    import feature_extraction
except ImportError:
    print("警告: feature_extraction.py 未在同一目录或PYTHONPATH中找到。请确保该文件存在。")
    def placeholder_preprocess_data(file_path, output_dir="processed_data"):
        print(f"占位符: 正在尝试从 {os.path.join(output_dir, 'preprocessed_data.csv')} 加载预处理数据。")
        processed_file = os.path.join(output_dir, 'preprocessed_data.csv')
        if not os.path.exists(processed_file):
            raise FileNotFoundError(f"错误: 预处理数据 {processed_file} 未找到。请先运行 feature_extraction.py。")
        df = pd.read_csv(processed_file)
        if 'Resistant' not in df.columns:
            raise ValueError("错误: 预处理数据中缺少 'Resistant' 列。")
        return df, None
    feature_extraction = type('module', (object,), {'preprocess_data': placeholder_preprocess_data, 'AMINO_ACIDS': ['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V', 'X']})()


# --- 模型定义 ---
def build_svm_model(C=1.0, kernel='rbf', gamma='scale', class_weight=None, random_state=42):
    """构建SVM模型"""
    return SVC(C=C, kernel=kernel, gamma=gamma, probability=True, class_weight=class_weight, random_state=random_state)

def build_rf_model(n_estimators=100, max_depth=None, min_samples_split=2, class_weight=None, random_state=42, n_jobs=-1):
    """构建随机森林模型"""
    return RandomForestClassifier(n_estimators=n_estimators, max_depth=max_depth, 
                                  min_samples_split=min_samples_split, random_state=random_state, 
                                  class_weight=class_weight, n_jobs=n_jobs)

def build_cnn_model(input_shape, 
                    filters1=32, kernel_size1=3, pool_size1=2, dropout1=0.3,
                    filters2=64, kernel_size2=3, pool_size2=2, dropout2=0.3,
                    dense_units=100, dropout_dense=0.5,
                    learning_rate=0.001):
    """构建1D CNN模型"""
    model = Sequential([
        tf.keras.layers.Reshape((input_shape[0], 1), input_shape=input_shape),
        Conv1D(filters=filters1, kernel_size=kernel_size1, activation='relu', padding='same'),
        BatchNormalization(),
        MaxPooling1D(pool_size=pool_size1),
        Dropout(dropout1),

        Conv1D(filters=filters2, kernel_size=kernel_size2, activation='relu', padding='same'),
        BatchNormalization(),
        MaxPooling1D(pool_size=pool_size2),
        Dropout(dropout2),

        Flatten(),
        Dense(dense_units, activation='relu'),
        Dropout(dropout_dense),
        Dense(1, activation='sigmoid')
    ])
    optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
    model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy', 
                                                                           tf.keras.metrics.Precision(name='precision'),
                                                                           tf.keras.metrics.Recall(name='recall'),
                                                                           tf.keras.metrics.AUC(name='roc_auc')])
    return model

def build_lstm_model(input_shape, 
                     lstm_units1=64, dropout_lstm1=0.3,
                     lstm_units2=32, dropout_lstm2=0.3,
                     dense_units=64, dropout_dense=0.5,
                     learning_rate=0.001):
    """构建LSTM模型"""
    model = Sequential([
        tf.keras.layers.Reshape((input_shape[0], 1), input_shape=input_shape),
        LSTM(lstm_units1, return_sequences=True),
        Dropout(dropout_lstm1),
        LSTM(lstm_units2),
        Dropout(dropout_lstm2),
        Dense(dense_units, activation='relu'),
        Dropout(dropout_dense),
        Dense(1, activation='sigmoid')
    ])
    optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
    model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy',
                                                                           tf.keras.metrics.Precision(name='precision'),
                                                                           tf.keras.metrics.Recall(name='recall'),
                                                                           tf.keras.metrics.AUC(name='roc_auc')])
    return model

def build_attention_model(input_shape, 
                          lstm_units_att=64, dense_units_att=64,
                          learning_rate=0.001):
    """构建带简单Attention的LSTM模型 (示例)"""
    inputs = Input(shape=input_shape)
    x = tf.keras.layers.Reshape((input_shape[0], 1))(inputs)
    
    lstm_out = LSTM(lstm_units_att, return_sequences=True)(x)
    
    attention_probs = Dense(lstm_units_att, activation='softmax', name='attention_vec')(lstm_out)
    attention_mul = tf.keras.layers.multiply([lstm_out, attention_probs])
    attention_mul = Flatten()(attention_mul)

    x = Dense(dense_units_att, activation='relu')(attention_mul)
    x = Dropout(0.5)(x) # Standard dropout rate
    outputs = Dense(1, activation='sigmoid')(x)
    
    model = Model(inputs=inputs, outputs=outputs)
    optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
    model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy',
                                                                           tf.keras.metrics.Precision(name='precision'),
                                                                           tf.keras.metrics.Recall(name='recall'),
                                                                           tf.keras.metrics.AUC(name='roc_auc')])
    return model

def build_bert_model_placeholder(input_shape, 
                                 dense_units1_bert=128, dropout1_bert=0.3,
                                 dense_units2_bert=64, dropout2_bert=0.3,
                                 learning_rate=0.001):
    """BERT模型的占位符或简化版本"""
    print("注意: 这是一个BERT模型的简化占位符。")
    inputs = Input(shape=input_shape)
    x = Dense(dense_units1_bert, activation='relu')(inputs)
    x = Dropout(dropout1_bert)(x)
    x = Dense(dense_units2_bert, activation='relu')(x)
    x = Dropout(dropout2_bert)(x)
    outputs = Dense(1, activation='sigmoid')(x)
    
    model = Model(inputs=inputs, outputs=outputs)
    optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
    model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy',
                                                                           tf.keras.metrics.Precision(name='precision'),
                                                                           tf.keras.metrics.Recall(name='recall'),
                                                                           tf.keras.metrics.AUC(name='roc_auc')])
    return model


# --- 训练和评估 ---
def train_and_evaluate_model(X, y, model_type, n_folds, output_dir, args, random_state=42):
    # ***** 重要补充：保存特征列名 *****
    feature_columns = X.columns.tolist()
    feature_columns_path = os.path.join(output_dir, f"{model_type}_feature_columns.json")
    try:
        with open(feature_columns_path, 'w') as f:
            json.dump(feature_columns, f)
        print(f"Feature columns saved to: {feature_columns_path}")
    except IOError as e:
        print(f"Error saving feature columns: {e}")
    # ***** 补充结束 *****

    skf = StratifiedKFold(n_splits=n_folds, shuffle=True, random_state=random_state)
    cv_results = []
    
    # 新增：用于保存OOF预测结果
    oof_predictions = np.zeros(len(y))
    oof_true_labels = y.copy()
    
    # TF模型回调
    early_stopping = EarlyStopping(monitor='val_loss', patience=args.tf_patience, restore_best_weights=True)
    reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=args.tf_lr_patience, min_lr=1e-6)

    scaler = StandardScaler()
    # Fit scaler on the full X data before CV, then transform train/val splits.
    # For strictness, fit_transform on train and transform on val in each fold.
    # Here, for simplicity with grid search, fit on all X once.
    X_scaled_full_fit = scaler.fit_transform(X) 
    
    scaler_path = os.path.join(output_dir, f"{model_type}_scaler.joblib")
    joblib.dump(scaler, scaler_path)
    print(f"Scaler已保存到: {scaler_path}")

    for fold, (train_idx, val_idx) in enumerate(skf.split(X_scaled_full_fit, y)):
        print(f"\n--- 第 {fold+1}/{n_folds} 折 ---")
        # Use the pre-scaled data and slice
        X_train, X_val = X_scaled_full_fit[train_idx], X_scaled_full_fit[val_idx]
        y_train, y_val = y.iloc[train_idx], y.iloc[val_idx]

        class_weight_dict = None # Placeholder for class weights if needed
        input_shape = (X_train.shape[1],)

        if model_type == 'svm':
            model = build_svm_model(C=args.svm_C, kernel=args.svm_kernel, gamma=args.svm_gamma, 
                                    class_weight=class_weight_dict, random_state=random_state)
            model.fit(X_train, y_train)
            y_pred_proba = model.predict_proba(X_val)[:, 1]
            y_pred = model.predict(X_val)
        elif model_type == 'rf':
            model = build_rf_model(n_estimators=args.rf_n_estimators, max_depth=args.rf_max_depth, 
                                   min_samples_split=args.rf_min_samples_split, 
                                   class_weight=class_weight_dict, random_state=random_state, n_jobs=args.n_jobs)
            model.fit(X_train, y_train)
            y_pred_proba = model.predict_proba(X_val)[:, 1]
            y_pred = model.predict(X_val)
        elif model_type in ['cnn', 'lstm', 'attention', 'bert_placeholder']:
            if model_type == 'cnn':
                model = build_cnn_model(input_shape, learning_rate=args.cnn_learning_rate,
                                        filters1=args.cnn_filters1, kernel_size1=args.cnn_kernel_size1, pool_size1=args.cnn_pool_size1, dropout1=args.cnn_dropout1,
                                        filters2=args.cnn_filters2, kernel_size2=args.cnn_kernel_size2, pool_size2=args.cnn_pool_size2, dropout2=args.cnn_dropout2,
                                        dense_units=args.cnn_dense_units, dropout_dense=args.cnn_dropout_dense)
            elif model_type == 'lstm':
                model = build_lstm_model(input_shape, learning_rate=args.lstm_learning_rate,
                                         lstm_units1=args.lstm_units1, dropout_lstm1=args.lstm_dropout1,
                                         lstm_units2=args.lstm_units2, dropout_lstm2=args.lstm_dropout2,
                                         dense_units=args.lstm_dense_units, dropout_dense=args.lstm_dropout_dense)
            elif model_type == 'attention':
                 model = build_attention_model(input_shape, learning_rate=args.att_learning_rate,
                                               lstm_units_att=args.att_lstm_units, dense_units_att=args.att_dense_units)
            else: # bert_placeholder
                model = build_bert_model_placeholder(input_shape, learning_rate=args.bert_learning_rate,
                                                     dense_units1_bert=args.bert_dense_units1, dropout1_bert=args.bert_dropout1,
                                                     dense_units2_bert=args.bert_dense_units2, dropout2_bert=args.bert_dropout2)
            
            print(f"训练 {model_type.upper()} 模型 (Epochs: {args.tf_epochs}, Batch: {args.tf_batch_size})...")
            history = model.fit(X_train, y_train.to_numpy(), 
                                validation_data=(X_val, y_val.to_numpy()),
                                epochs=args.tf_epochs, 
                                batch_size=args.tf_batch_size,
                                callbacks=[early_stopping, reduce_lr],
                                class_weight=class_weight_dict,
                                verbose=1 if args.verbose else 0)
            y_pred_proba = model.predict(X_val).ravel()
            y_pred = (y_pred_proba > 0.5).astype(int)
            
            # Optional: Save fold model (can be large for TF)
            # fold_model_path = os.path.join(output_dir, f"{model_type}_fold_{fold+1}.h5")
            # model.save(fold_model_path)
            # print(f"{model_type.upper()} 第 {fold+1} 折模型已保存到: {fold_model_path}")
        else:
            raise ValueError(f"不支持的模型类型: {model_type}")

        # 保存OOF预测结果
        oof_predictions[val_idx] = y_pred_proba

        acc = accuracy_score(y_val, y_pred)
        precision = precision_score(y_val, y_pred, zero_division=0)
        recall = recall_score(y_val, y_pred, zero_division=0)
        f1 = f1_score(y_val, y_pred, zero_division=0)
        try:
            roc_auc = roc_auc_score(y_val, y_pred_proba)
        except ValueError:
            roc_auc = np.nan 
            print("警告: ROC AUC无法计算，验证集中只有一个类别。")

        print(f"第 {fold+1} 折评估结果: Acc={acc:.4f}, P={precision:.4f}, R={recall:.4f}, F1={f1:.4f}, AUC={roc_auc:.4f}")
        
        cv_results.append({'Fold': fold + 1, 'Accuracy': acc, 'Precision': precision, 'Recall': recall, 'F1-score': f1, 'ROC-AUC': roc_auc})
        
        if model_type in ['cnn', 'lstm', 'attention', 'bert_placeholder']:
            tf.keras.backend.clear_session()

    # 保存OOF预测结果
    oof_df = pd.DataFrame({
        'true_label': oof_true_labels,
        'prediction_prob': oof_predictions
    })
    oof_path = os.path.join(output_dir, f"{model_type}_oof_predictions.csv")
    oof_df.to_csv(oof_path, index=False)
    print(f"\nOOF预测结果已保存到: {oof_path}")
    
    # 计算最佳阈值
    thresholds = np.arange(0.1, 1.0, 0.05)
    best_threshold = 0.5  # 默认阈值
    best_f1 = 0.0
    
    threshold_results = []
    for threshold in thresholds:
        y_pred_threshold = (oof_predictions >= threshold).astype(int)
        f1 = f1_score(oof_true_labels, y_pred_threshold, zero_division=0)
        precision = precision_score(oof_true_labels, y_pred_threshold, zero_division=0)
        recall = recall_score(oof_true_labels, y_pred_threshold, zero_division=0)
        
        threshold_results.append({
            'threshold': threshold,
            'f1_score': f1,
            'precision': precision,
            'recall': recall
        })
        
        if f1 > best_f1:
            best_f1 = f1
            best_threshold = threshold
    
    # 保存阈值搜索结果
    threshold_df = pd.DataFrame(threshold_results)
    threshold_path = os.path.join(output_dir, f"{model_type}_threshold_search.csv")
    threshold_df.to_csv(threshold_path, index=False)
    print(f"阈值搜索结果已保存到: {threshold_path}")
    print(f"最佳阈值: {best_threshold:.2f}, F1-score: {best_f1:.4f}")

    cv_results_df = pd.DataFrame(cv_results)
    cv_output_path = os.path.join(output_dir, f"{model_type}_cv_results.csv")
    cv_results_df.to_csv(cv_output_path, index=False)
    print(f"\n交叉验证结果已保存到: {cv_output_path}")

    avg_metrics = cv_results_df.drop(columns=['Fold']).mean().to_dict()
    # 添加最佳阈值和对应的F1-score到平均指标中
    avg_metrics['Best_Threshold'] = best_threshold
    avg_metrics['Best_Threshold_F1'] = best_f1
    
    print("\n交叉验证平均性能:")
    for metric, value in avg_metrics.items(): print(f"  Average {metric}: {value:.4f}")
    
    avg_metrics_df = pd.DataFrame([avg_metrics])
    avg_output_path = os.path.join(output_dir, f"{model_type}_avg_cv_metrics.csv")
    avg_metrics_df.to_csv(avg_output_path, index=False)
    print(f"平均交叉验证性能已保存到: {avg_output_path}")

    print("\n--- 训练最终模型 (使用所有数据) ---")
    final_model_dir = os.path.join(output_dir, "final_model_trained_on_all_data") # More descriptive name
    if not os.path.exists(final_model_dir): os.makedirs(final_model_dir)
    
    final_model_path_prefix = os.path.join(final_model_dir, f"final_{model_type}")
    
    # Use the same scaler fitted on the full dataset earlier
    X_scaled_full = X_scaled_full_fit

    if model_type == 'svm':
        final_model = build_svm_model(C=args.svm_C, kernel=args.svm_kernel, gamma=args.svm_gamma, 
                                      class_weight=class_weight_dict, random_state=random_state)
        final_model.fit(X_scaled_full, y)
        joblib.dump(final_model, f"{final_model_path_prefix}.joblib")
    elif model_type == 'rf':
        final_model = build_rf_model(n_estimators=args.rf_n_estimators, max_depth=args.rf_max_depth, 
                                     min_samples_split=args.rf_min_samples_split, 
                                     class_weight=class_weight_dict, random_state=random_state, n_jobs=args.n_jobs)
        final_model.fit(X_scaled_full, y)
        joblib.dump(final_model, f"{final_model_path_prefix}.joblib")
    elif model_type in ['cnn', 'lstm', 'attention', 'bert_placeholder']:
        if model_type == 'cnn':
            final_model = build_cnn_model(input_shape, learning_rate=args.cnn_learning_rate,
                                        filters1=args.cnn_filters1, kernel_size1=args.cnn_kernel_size1, pool_size1=args.cnn_pool_size1, dropout1=args.cnn_dropout1,
                                        filters2=args.cnn_filters2, kernel_size2=args.cnn_kernel_size2, pool_size2=args.cnn_pool_size2, dropout2=args.cnn_dropout2,
                                        dense_units=args.cnn_dense_units, dropout_dense=args.cnn_dropout_dense)
        elif model_type == 'lstm':
            final_model = build_lstm_model(input_shape, learning_rate=args.lstm_learning_rate,
                                         lstm_units1=args.lstm_units1, dropout_lstm1=args.lstm_dropout1,
                                         lstm_units2=args.lstm_units2, dropout_lstm2=args.lstm_dropout2,
                                         dense_units=args.lstm_dense_units, dropout_dense=args.lstm_dropout_dense)
        elif model_type == 'attention':
            final_model = build_attention_model(input_shape, learning_rate=args.att_learning_rate,
                                               lstm_units_att=args.att_lstm_units, dense_units_att=args.att_dense_units)
        else: # bert_placeholder
            final_model = build_bert_model_placeholder(input_shape, learning_rate=args.bert_learning_rate,
                                                     dense_units1_bert=args.bert_dense_units1, dropout1_bert=args.bert_dropout1,
                                                     dense_units2_bert=args.bert_dense_units2, dropout2_bert=args.bert_dropout2)

        print(f"训练最终 {model_type.upper()} 模型 (Epochs: {args.tf_epochs_final}, Batch: {args.tf_batch_size})...")
        final_early_stopping = EarlyStopping(monitor='loss', patience=args.tf_patience_final, restore_best_weights=True)
        final_reduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.2, patience=args.tf_lr_patience_final, min_lr=1e-6)
        
        final_model.fit(X_scaled_full, y.to_numpy(), epochs=args.tf_epochs_final, batch_size=args.tf_batch_size,
                        callbacks=[final_early_stopping, final_reduce_lr], 
                        class_weight=class_weight_dict, verbose=1 if args.verbose else 0)
        final_model.save(f"{final_model_path_prefix}.h5")
        tf.keras.backend.clear_session()
    
    print(f"最终 {model_type.upper()} 模型已训练并保存到: {final_model_path_prefix}.[joblib/h5]")
    return final_model_path_prefix, scaler_path, avg_output_path


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="肽序列耐受性预测模型训练脚本", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    
    # General arguments
    parser.add_argument("--input_file", type=str, required=True, help="输入的原始CSV数据文件路径")
    parser.add_argument("--model_type", type=str, default="rf", 
                        choices=['svm', 'rf', 'cnn', 'lstm', 'attention', 'bert_placeholder'], 
                        help="要训练的模型类型")
    parser.add_argument("--n_folds", type=int, default=5, help="交叉验证的折数")
    parser.add_argument("--output_dir", type=str, default="training_run_output", 
                        help="存放本次运行所有结果和模型的基础目录 (会被grid_search覆盖)")
    parser.add_argument("--random_state", type=int, default=42, help="随机种子")
    parser.add_argument("--n_jobs", type=int, default=-1, help="用于RF等模型的并行任务数 (-1表示使用所有处理器)")
    parser.add_argument("--verbose", action='store_true', help="是否打印详细TF训练日志")

    # SVM arguments
    svm_group = parser.add_argument_group('SVM Hyperparameters')
    svm_group.add_argument("--svm_C", type=float, default=1.0, help="SVM正则化参数C")
    svm_group.add_argument("--svm_kernel", type=str, default="rbf", choices=['linear', 'poly', 'rbf', 'sigmoid'], help="SVM核函数")
    svm_group.add_argument("--svm_gamma", type=str, default="scale", help="SVM核系数 ('scale', 'auto' or float)")

    # RF arguments
    rf_group = parser.add_argument_group('Random Forest Hyperparameters')
    rf_group.add_argument("--rf_n_estimators", type=int, default=100, help="RF中树的数量")
    rf_group.add_argument("--rf_max_depth", type=int, default=None, help="RF中树的最大深度 (None表示无限制)")
    rf_group.add_argument("--rf_min_samples_split", type=int, default=2, help="RF中分裂内部节点所需的最小样本数")

    # CNN arguments
    cnn_group = parser.add_argument_group('CNN Hyperparameters')
    cnn_group.add_argument("--cnn_learning_rate", type=float, default=0.001, help="CNN学习率")
    cnn_group.add_argument("--cnn_filters1", type=int, default=32, help="CNN第一卷积层滤波器数量")
    cnn_group.add_argument("--cnn_kernel_size1", type=int, default=3, help="CNN第一卷积层核大小")
    cnn_group.add_argument("--cnn_pool_size1", type=int, default=2, help="CNN第一池化层大小")
    cnn_group.add_argument("--cnn_dropout1", type=float, default=0.3, help="CNN第一Dropout率")
    cnn_group.add_argument("--cnn_filters2", type=int, default=64, help="CNN第二卷积层滤波器数量")
    cnn_group.add_argument("--cnn_kernel_size2", type=int, default=3, help="CNN第二卷积层核大小")
    cnn_group.add_argument("--cnn_pool_size2", type=int, default=2, help="CNN第二池化层大小")
    cnn_group.add_argument("--cnn_dropout2", type=float, default=0.3, help="CNN第二Dropout率")
    cnn_group.add_argument("--cnn_dense_units", type=int, default=100, help="CNN全连接层单元数")
    cnn_group.add_argument("--cnn_dropout_dense", type=float, default=0.5, help="CNN全连接层Dropout率")

    # LSTM arguments
    lstm_group = parser.add_argument_group('LSTM Hyperparameters')
    lstm_group.add_argument("--lstm_learning_rate", type=float, default=0.001, help="LSTM学习率")
    lstm_group.add_argument("--lstm_units1", type=int, default=64, help="LSTM第一层单元数")
    lstm_group.add_argument("--lstm_dropout1", type=float, default=0.3, help="LSTM第一层Dropout率")
    lstm_group.add_argument("--lstm_units2", type=int, default=32, help="LSTM第二层单元数")
    lstm_group.add_argument("--lstm_dropout2", type=float, default=0.3, help="LSTM第二层Dropout率")
    lstm_group.add_argument("--lstm_dense_units", type=int, default=64, help="LSTM全连接层单元数")
    lstm_group.add_argument("--lstm_dropout_dense", type=float, default=0.5, help="LSTM全连接层Dropout率")
    
    # Attention Model arguments (simplified, uses LSTM base)
    att_group = parser.add_argument_group('Attention Model Hyperparameters')
    att_group.add_argument("--att_learning_rate", type=float, default=0.001, help="Attention模型学习率")
    att_group.add_argument("--att_lstm_units", type=int, default=64, help="Attention模型中LSTM单元数")
    att_group.add_argument("--att_dense_units", type=int, default=64, help="Attention模型中最终Dense层单元数")

    # BERT Placeholder arguments
    bert_group = parser.add_argument_group('BERT Placeholder Hyperparameters')
    bert_group.add_argument("--bert_learning_rate", type=float, default=0.001, help="BERT占位符学习率")
    bert_group.add_argument("--bert_dense_units1", type=int, default=128, help="BERT占位符第一Dense层单元数")
    bert_group.add_argument("--bert_dropout1", type=float, default=0.3, help="BERT占位符第一Dense层Dropout")
    bert_group.add_argument("--bert_dense_units2", type=int, default=64, help="BERT占位符第二Dense层单元数")
    bert_group.add_argument("--bert_dropout2", type=float, default=0.3, help="BERT占位符第二Dense层Dropout")

    # TensorFlow training arguments (common for CNN, LSTM, Attention, BERT placeholder)
    tf_group = parser.add_argument_group('TensorFlow Training Parameters')
    tf_group.add_argument("--tf_epochs", type=int, default=50, help="TF模型交叉验证训练轮数")
    tf_group.add_argument("--tf_batch_size", type=int, default=32, help="TF模型训练批大小")
    tf_group.add_argument("--tf_patience", type=int, default=10, help="TF EarlyStopping的patience (CV)")
    tf_group.add_argument("--tf_lr_patience", type=int, default=5, help="TF ReduceLROnPlateau的patience (CV)")
    tf_group.add_argument("--tf_epochs_final", type=int, default=100, help="TF最终模型训练轮数")
    tf_group.add_argument("--tf_patience_final", type=int, default=15, help="TF EarlyStopping的patience (最终模型)")
    tf_group.add_argument("--tf_lr_patience_final", type=int, default=7, help="TF ReduceLROnPlateau的patience (最终模型)")


    args = parser.parse_args()
    
    # 如果output_dir不是由grid_search设置的，则创建一个带时间戳的目录
    # 当被grid_search调用时，grid_search会提供一个特定的output_dir
    if args.output_dir == "training_run_output": # Default value, so not set by grid_search
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        # Ensure the base directory for non-grid-search runs exists
        base_output_dir_for_standalone = "training_results_standalone" 
        if not os.path.exists(base_output_dir_for_standalone):
            os.makedirs(base_output_dir_for_standalone)
        current_run_output_dir = os.path.join(base_output_dir_for_standalone, f"{args.model_type}_{timestamp}")
    else: # output_dir is likely set by grid_search or user
        current_run_output_dir = args.output_dir

    if not os.path.exists(current_run_output_dir):
        os.makedirs(current_run_output_dir)

    print(f"模型类型: {args.model_type.upper()}")
    print(f"交叉验证折数: {args.n_folds}")
    print(f"结果将保存到: {current_run_output_dir}")
    print(f"传入参数: {vars(args)}")


    # 设置随机种子
    np.random.seed(args.random_state)
    tf.random.set_seed(args.random_state)
    # Potentially set os.environ['PYTHONHASHSEED'] = str(args.random_state) for more determinism if needed

    try:
        processed_data_dir = os.path.join(current_run_output_dir, "processed_data_cache") # Cache preprocessed data per run
        
        # Check if preprocessed data for this specific input file already exists for this run
        # This is more relevant if grid_search calls this multiple times with the same input_file
        # For now, preprocess each time grid_search calls, or if run standalone.
        # A more sophisticated caching could be based on input_file hash.
        
        if hasattr(feature_extraction, 'preprocess_data') and callable(feature_extraction.preprocess_data):
            if not hasattr(feature_extraction, 'AMINO_ACIDS') or not feature_extraction.AMINO_ACIDS:
                 print("警告: feature_extraction.AMINO_ACIDS 未找到或为空。将使用默认列表。")
                 feature_extraction.AMINO_ACIDS = ['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V', 'X']
            print(f"使用的氨基酸列表长度 (来自feature_extraction): {len(feature_extraction.AMINO_ACIDS)}")

            processed_df, aa_encoder = feature_extraction.preprocess_data(args.input_file, output_dir=processed_data_dir)
            print("特征提取完成。")
            if aa_encoder:
                encoder_path = os.path.join(current_run_output_dir, f"{args.model_type}_aa_encoder.pkl")
                with open(encoder_path, 'wb') as f: pickle.dump(aa_encoder, f)
                print(f"氨基酸编码器已保存到: {encoder_path}")
        else:
            raise ImportError("错误: feature_extraction.preprocess_data 函数无法访问。")

        if 'Resistant' not in processed_df.columns: raise ValueError("错误: 'Resistant' 列未在预处理数据中找到。")
        
        X = processed_df.drop(columns=['Resistant'])
        y = processed_df['Resistant']
        X = X.apply(pd.to_numeric, errors='coerce').fillna(0)

        print(f"特征形状: {X.shape}, 目标形状: {y.shape}")
        if X.shape[0] == 0 or X.empty: raise ValueError("错误: 特征提取后没有数据或特征DataFrame为空。")

        # 注意：X 在这里被传递给 train_and_evaluate_model，特征列名将在该函数内部从 X 中提取并保存
        _, _, avg_metrics_path = train_and_evaluate_model(X, y, args.model_type, args.n_folds, current_run_output_dir, args, args.random_state)
        
        print("\n模型训练和评估流程成功完成。")
        # The train_and_evaluate_model function already prints paths.
        # For grid search, the avg_metrics_path is important.
        print(f"平均性能指标CSV文件路径: {avg_metrics_path}")


    except FileNotFoundError as e: print(f"文件错误: {e}")
    except ImportError as e: print(f"导入错误: {e}")
    except ValueError as e: print(f"数据值错误: {e}")
    except Exception as e:
        print(f"训练过程中发生未预料的错误: {e}")
        import traceback
        traceback.print_exc()
