
import gc
# 数据加载与检查
from data_loader import load_data, check_label_column
# 特征工程
from feature_engineering import encode_features, standardize, split_data, apply_smote, to_numpy_float32
# 模型定义
from models.logistic_regression import CustomLogisticRegression
from models.mlp import CustomMLP
from models.cnn import CustomCNN

# 训练器
from trainer import train_keras_model

# 评估器
from evaluator import (
    plot_accuracy,
    plot_confusion_matrix,
    plot_roc_curve,
    calculate_overfitting_metrics,
    enhanced_classification_report
)

# 工具函数
from utils import add_channel_dim, compare_models

# Scikit-learn 工具
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report, roc_auc_score

# Keras 包装器（用于 CNN 网格搜索）
from scikeras.wrappers import KerasClassifier


# Step 1: Load Data
df = load_data()
df = check_label_column(df)

# Step 2: Split Features & Labels
X = df.drop('HeartDisease', axis=1)
y = df['HeartDisease']  # 标签列名修正

# Step 2.5: Encode label
le = LabelEncoder()
y = le.fit_transform(y)

# Step 3: Encode Categorical Features
X = encode_features(X)

# Step 4: Standardize
X_scaled = standardize(X)

# Step 5: Train/Test Split
X_train, X_test, y_train, y_test = split_data(X_scaled, y)

# 保存原始训练数据用于后续参数调优
X_train_orig = X_train.copy()
y_train_orig = y_train.copy()

# Step 6: Apply SMOTE (Over-sampling)
X_train_res, y_train_res = apply_smote(X_train, y_train)

# Step 7: Convert all data to float32
X_train_res = to_numpy_float32(X_train_res)
X_test = to_numpy_float32(X_test)
y_train_res = to_numpy_float32(y_train_res)
y_test = to_numpy_float32(y_test)

# Step 8: Model Training - Logistic Regression
lr = CustomLogisticRegression()
lr.fit(X_train_res, y_train_res)
print(" Logistic Regression 训练完成")

# Step 8.5: 逻辑回归评估
lr_preds = lr.predict(X_test)
lr_probs = lr.model.predict_proba(X_test)[:, 1]
print("\n逻辑回归评估:")
print(f"准确率: {accuracy_score(y_test, lr_preds):.4f}")
print(f"AUC: {roc_auc_score(y_test, lr_probs):.4f}")
print(classification_report(y_test, lr_preds, target_names=['健康', '心脏病']))

# 绘制混淆矩阵
plot_confusion_matrix(y_test, lr_preds, classes=['健康', '心脏病'], title='逻辑回归混淆矩阵')

# 绘制ROC曲线
plot_roc_curve(y_test, lr_probs, model_name='逻辑回归')

# 释放内存
del X, y, X_scaled
gc.collect()

# Step 9: Model Training - MLP
mlp = CustomMLP(input_shape=X_train_res.shape[1],
                hidden_units=[64, 32],
                learning_rate=0.001,
                alpha=0.001,
                batch_size=128,
                verbose=False)

mlp.fit(X_train_res, y_train_res)
print("MLP 训练完成")

# Step 9.5: MLP评估
mlp_preds = mlp.predict(X_test)
mlp_probs = mlp.predict_proba(X_test)
print("\nMLP评估:")
print(f"准确率: {accuracy_score(y_test, mlp_preds):.4f}")
print(f"AUC: {roc_auc_score(y_test, mlp_probs):.4f}")
print(classification_report(y_test, mlp_preds, target_names=['健康', '心脏病']))

# 绘制混淆矩阵
plot_confusion_matrix(y_test, mlp_preds, classes=['健康', '心脏病'], title='MLP混淆矩阵')

# 绘制ROC曲线
plot_roc_curve(y_test, mlp_probs, model_name='MLP')

# 释放内存
del mlp_preds, mlp_probs
gc.collect()

# Step 10: Model Training - CNN
X_train_cnn = add_channel_dim(X_train_res)
X_test_cnn = add_channel_dim(X_test)

cnn = CustomCNN(input_shape=X_train_cnn.shape[1:])
cnn.compile()
history_cnn = train_keras_model(cnn, X_train_cnn, y_train_res, X_test_cnn, y_test)

# Step 10.5: CNN评估
cnn_preds = (cnn.predict(X_test_cnn).flatten() > 0.5).astype(int)
cnn_probs = cnn.predict(X_test_cnn).flatten()
print("\nCNN评估:")
print(f"准确率: {accuracy_score(y_test, cnn_preds):.4f}")
print(f"AUC: {roc_auc_score(y_test, cnn_probs):.4f}")
print(classification_report(y_test, cnn_preds, target_names=['健康', '心脏病']))

# 绘制混淆矩阵
plot_confusion_matrix(y_test, cnn_preds, classes=['健康', '心脏病'], title='CNN混淆矩阵')

# 绘制ROC曲线
plot_roc_curve(y_test, cnn_probs, model_name='CNN')

# Step 11: Plot Accuracy
plot_accuracy(history_cnn.history)

# Step 12: Compare All Models
compare_models([lr, mlp, cnn],
               ['Logistic Regression', 'MLP', 'CNN'],
               [X_test, X_test, X_test_cnn],
               y_test)

# 释放原始模型
del lr, mlp, cnn, history_cnn
gc.collect()

# Step 13: Parameter Tuning - Logistic Regression
X_train_res_tuning, y_train_res_tuning = apply_smote(X_train_orig, y_train_orig)
X_train_res_tuning = to_numpy_float32(X_train_res_tuning)

param_grid_lr = {'C': [0.001, 0.01, 0.1, 1, 10, 100]}
grid_search_lr = GridSearchCV(CustomLogisticRegression().model, param_grid_lr, cv=5, scoring='accuracy')
grid_search_lr.fit(X_train_res_tuning, y_train_res_tuning)
best_params_lr = grid_search_lr.best_params_
print(f"Best parameters for Logistic Regression: {best_params_lr}")

# 使用最优参数重新训练逻辑回归
best_lr = CustomLogisticRegression()
best_lr.model = grid_search_lr.best_estimator_
best_lr.fit(X_train_res_tuning, y_train_res_tuning)

# Step 14: Parameter Tuning - MLP
param_grid_mlp = {
    'hidden_layer_sizes': [(64,), (64, 32), (128, 64)],
    'alpha': [0.0001, 0.001, 0.01],
    'learning_rate_init': [0.001, 0.01, 0.1]
}

base_mlp = CustomMLP(
    input_shape=X_train_res_tuning.shape[1],
    hidden_units=[64, 32],
    learning_rate=0.001,
    alpha=0.001,
    batch_size=128,
    verbose=False
)

grid_search_mlp = GridSearchCV(
    base_mlp,
    param_grid=param_grid_mlp,
    cv=5,
    scoring='accuracy'
)
grid_search_mlp.fit(X_train_res_tuning, y_train_res_tuning)
best_params_mlp = grid_search_mlp.best_params_
print(f"Best parameters for MLP: {best_params_mlp}")

best_mlp = grid_search_mlp.best_estimator_


# Step 15: Parameter Tuning - CNN（优化版：跳过耗时的 GridSearchCV）

# 创建一个新的 CNN 模型作为 best_cnn
X_train_cnn_tuning = add_channel_dim(X_train_res_tuning)  # 用于模型输入维度
best_cnn = CustomCNN(input_shape=X_train_cnn_tuning.shape[1:])
best_cnn.compile()
history_best_cnn = train_keras_model(best_cnn, X_train_cnn_tuning, y_train_res_tuning, X_test_cnn, y_test)

# 模拟最佳参数输出（仅供打印用）
best_params_cnn = {
    'filters': 32,
    'kernel_size': 3,
    'dense_units': 64,
    'learning_rate': 0.001,
    'batch_size': 32,
    'epochs': 10
}
print(f"Best parameters for CNN (模拟): {best_params_cnn}")

# Step 16: 比较调优后的模型
print("\n===== 调优后模型性能比较 =====")
compare_models([best_lr, best_mlp, best_cnn],
               ['调优逻辑回归', '调优MLP', '调优CNN'],
               [X_test, X_test, X_test_cnn],
               y_test)

# Step 17: 过拟合分析
print("\n===== 过拟合分析 =====")
print("调优CNN过拟合指标:")
calculate_overfitting_metrics(history_best_cnn.history)

# Step 18: 详细分类报告
print("\n===== 最终模型详细报告 =====")
print("调优逻辑回归报告:")
enhanced_classification_report(y_test, best_lr.predict(X_test))

print("\n调优MLP报告:")
enhanced_classification_report(y_test, best_mlp.predict(X_test))

print("\n调优CNN报告:")
enhanced_classification_report(y_test, (best_cnn.predict(X_test_cnn).flatten() > 0.5).astype(int))

# 最后释放所有剩余变量
# 原报错行
# del X_train_res_tuning, y_train_res_tuning, X_train_cnn_tuning, X_train_cnn, X_test_cnn

# 修复后：
del X_train_res_tuning, y_train_res_tuning, X_train_cnn, X_test_cnn

del best_lr, best_mlp, best_cnn
gc.collect()
