import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.compose import ColumnTransformer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix, roc_curve, auc
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
from xgboost import XGBClassifier
from imblearn.over_sampling import SMOTE
from logUtil import get_logger
import joblib
import itertools  # For confusion matrix plotting

from sklearn.ensemble import HistGradientBoostingClassifier

# --- 设置中文字体 (如果 Matplotlib 默认不支持) ---
plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号
# --- /设置中文字体 ---

BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DATA_DIR = os.path.join(BASE_DIR, 'data')
FIT_DIR = os.path.join(DATA_DIR, 'fit')
LOG_DIR = os.path.join(BASE_DIR, 'log')
MODEL_DIR = os.path.join(BASE_DIR, 'model')
PLOT_DIR = os.path.join(BASE_DIR, 'plots')  # 新增：用于存放图表

os.makedirs(FIT_DIR, exist_ok=True)
os.makedirs(LOG_DIR, exist_ok=True)
os.makedirs(MODEL_DIR, exist_ok=True)
os.makedirs(PLOT_DIR, exist_ok=True)  # 创建图表目录

# 日志
logger = get_logger('train', log_dir=LOG_DIR)

logger.info(f"Training started at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")

# 加载数据集
data_file_path = os.path.join(DATA_DIR, 'train.csv')
if not os.path.exists(data_file_path):
    logger.error(f"Data file not found: {data_file_path}")
    raise FileNotFoundError(f"Data file not found: {data_file_path}")
data = pd.read_csv(data_file_path)

# --- 绘图1: 目标变量分布 ---
plt.figure(figsize=(6, 4))
sns.countplot(x='Attrition', data=data)
plt.title('Distribution of Attrition')
plt.xlabel('Attrition (0: No, 1: Yes)')
plt.ylabel('Count')
attrition_plot_path = os.path.join(PLOT_DIR, 'attrition_distribution.png')
plt.savefig(attrition_plot_path)
plt.close()  # 关闭图形防止内存累积
logger.info(f"Saved attrition distribution plot to {attrition_plot_path}")
# --- /绘图1 ---

# 清洗数据：处理数值型特征的缺失值，用均值填充
numeric_columns = data.select_dtypes(include=['int64', 'float64']).columns
for col in numeric_columns:
    if data[col].isnull().any():
        data[col] = data[col].fillna(data[col].mean())
        logger.info(f"Filled missing values in column '{col}' with mean.")

# 去重：去除重复的行
initial_rows = len(data)
data = data.drop_duplicates()
final_rows = len(data)
if initial_rows != final_rows:
    logger.info(f"Dropped {initial_rows - final_rows} duplicate rows.")

# 提取特征和目标变量，只保留数值型特征
X = data.select_dtypes(include=['int64', 'float64']).drop('Attrition', axis=1, errors='ignore')
y = data['Attrition']

# --- 绘图2: 特征相关性热力图 (原始数据) ---
corr_matrix = X.corr()
plt.figure(figsize=(12, 10))
sns.heatmap(corr_matrix, annot=False, fmt=".2f", cmap='coolwarm', linewidths=0.5)
plt.title('Feature Correlation Heatmap (Original Data)')
corr_plot_path = os.path.join(PLOT_DIR, 'feature_correlation_original.png')
plt.savefig(corr_plot_path)
plt.close()
logger.info(f"Saved original feature correlation heatmap to {corr_plot_path}")
# --- /绘图2 ---

# 处理类别不平衡
smote = SMOTE(random_state=42)
X_resampled, y_resampled = smote.fit_resample(X, y)
logger.info(f"After SMOTE: {len(X_resampled)} samples.")

# --- 绘图3: SMOTE后目标变量分布 ---
plt.figure(figsize=(6, 4))
sns.countplot(x=y_resampled)
plt.title('Distribution of Attrition After SMOTE')
plt.xlabel('Attrition (0: No, 1: Yes)')
plt.ylabel('Count')
smote_attrition_plot_path = os.path.join(PLOT_DIR, 'attrition_distribution_after_smote.png')
plt.savefig(smote_attrition_plot_path)
plt.close()
logger.info(f"Saved SMOTE attrition distribution plot to {smote_attrition_plot_path}")
# --- /绘图3 ---


# --- 关键修改：特征工程 ---
full_preprocessor = ColumnTransformer(
    transformers=[
        ('num_poly_scaler', Pipeline([
            ('poly', PolynomialFeatures(degree=2, include_bias=False)),
            ('scaler', StandardScaler())
        ]), X.columns)
    ],
    remainder='passthrough'
)

X_processed = full_preprocessor.fit_transform(X_resampled)
logger.info(f"Feature engineering completed. Shape: {X_processed.shape}")

X_train, X_test, y_train, y_test = train_test_split(
    X_processed, y_resampled, test_size=0.25, random_state=42, stratify=y_resampled)
logger.info(f"Data split into train ({len(X_train)}) and test ({len(X_test)}) sets.")

# --- 模型调参：随机森林 ---
# 适度增加 n_estimators 和 max_depth 上限
rf = RandomForestClassifier(random_state=42, n_jobs=-1, class_weight='balanced')
param_grid_rf = {
    'n_estimators': [100, 150],  # 从 [50, 100] -> [100, 150]
    'max_depth': [8, 12, None],  # 从 [5, 8] -> [8, 12, None] (None 表示不限制)
    'min_samples_split': [2, 5],
    'min_samples_leaf': [1, 2],
    'max_features': ['sqrt', 'log2']
}
grid_search_rf = GridSearchCV(rf, param_grid_rf, cv=5, scoring='accuracy', n_jobs=-1, )
grid_search_rf.fit(X_train, y_train)
best_rf = grid_search_rf.best_estimator_
logger.info(f"Best RF params: {grid_search_rf.best_params_}")

# --- 模型调参：逻辑回归 ---
# 稍微细化 C 的搜索范围
logreg = LogisticRegression(random_state=42, max_iter=1000, solver='liblinear', n_jobs=-1, class_weight='balanced')
param_grid_logreg = {
    'C': [0.01, 0.1, 1.0, 10.0],  # 增加一个更低和一个更高的值
    'penalty': ['l1', 'l2']
}
grid_search_logreg = GridSearchCV(logreg, param_grid_logreg, cv=5, scoring='accuracy', n_jobs=-1)
grid_search_logreg.fit(X_train, y_train)
best_logreg = grid_search_logreg.best_estimator_
logger.info(f"Best LogReg params: {grid_search_logreg.best_params_}")
# --- 模型调参：XGBoost ---
# 1. 调整 n_estimators 和 max_depth
# 2. 稍微提高 learning_rate (或允许更多树，这里选择前者以控制总树数)
# 3. 固定一些不太敏感的参数以减少搜索空间
xgb = XGBClassifier(
    random_state=42,
    use_label_encoder=False,
    eval_metric='logloss',
    n_jobs=-1
)
param_grid_xgb = {
    'n_estimators': [100, 150],  # 从 [50] -> [100, 150]
    'max_depth': [4, 6],  # 从 [3] -> [4, 6]
    'learning_rate': [0.05, 0.1],  # 从 [0.01] -> [0.05, 0.1]
    'subsample': [0.8, 1.0],
    'colsample_bytree': [0.8, 1.0],
    'reg_alpha': [0, 0.1],
    'reg_lambda': [1, 1.5]
}
grid_search_xgb = GridSearchCV(xgb, param_grid_xgb, cv=5, scoring='accuracy', n_jobs=-1)
grid_search_xgb.fit(X_train, y_train)
best_xgb = grid_search_xgb.best_estimator_
logger.info(f"Best XGB params: {grid_search_xgb.best_params_}")

# --- 模型调参：HistGradientBoostingClassifier ---
# 这是一个计算效率较高的模型，尝试加入融合
hgb = HistGradientBoostingClassifier(random_state=42, class_weight='balanced')
# HistGradientBoostingClassifier 的超参相对没那么敏感，
# 我们做一个相对简单的网格搜索
param_grid_hgb = {
    'max_iter': [100, 150],  # 最大迭代次数 (类似 n_estimators)
    'max_depth': [None, 5, 8],  # 最大深度
    'learning_rate': [0.05, 0.1],  # 学习率
    'l2_regularization': [0.1, 1.0]  # L2 正则化
}
grid_search_hgb = GridSearchCV(hgb, param_grid_hgb, cv=5, scoring='accuracy', n_jobs=-1)
grid_search_hgb.fit(X_train, y_train)
best_hgb = grid_search_hgb.best_estimator_
logger.info(f"Best HGB params: {grid_search_hgb.best_params_}")

# --- 模型融合：Voting Classifier ---
# 使用调参后的新最佳模型，包括新加入的 HistGradientBoostingClassifier
voting_clf = VotingClassifier(
    estimators=[
        ('rf', best_rf),
        ('logreg', best_logreg),
        ('xgb', best_xgb),
        ('hgb', best_hgb)  # <-- 新增
    ],
    voting='soft',
    n_jobs=-1
)
voting_clf.fit(X_train, y_train)  # 训练融合模型
logger.info("Voting classifier (with HGB) trained successfully.")

# --- 预测与评估 ---
y_pred = voting_clf.predict(X_test)
y_proba = voting_clf.predict_proba(X_test)[:, 1]  # 获取正类概率用于ROC

# 评估模型
accuracy = accuracy_score(y_test, y_pred)
logger.info(f'Test Accuracy: {accuracy:.4f}')
print(f'Accuracy: {accuracy:.4f}')

report = classification_report(y_test, y_pred)
logger.info(f'Classification Report:\n{report}')
print('Classification Report:')
print(report)

# --- 绘图4: 混淆矩阵 ---
cm = confusion_matrix(y_test, y_pred)
plt.figure(figsize=(6, 5))
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
            xticklabels=['No Attrition', 'Attrition'],
            yticklabels=['No Attrition', 'Attrition'])
plt.title('Confusion Matrix')
plt.xlabel('Predicted Label')
plt.ylabel('True Label')
cm_plot_path = os.path.join(PLOT_DIR, 'confusion_matrix.png')
plt.savefig(cm_plot_path)
plt.close()
logger.info(f"Saved confusion matrix plot to {cm_plot_path}")
# --- /绘图4 ---


# --- 绘图5: ROC曲线 ---
fpr, tpr, _ = roc_curve(y_test, y_proba)
roc_auc = auc(fpr, tpr)

plt.figure(figsize=(6, 5))
plt.plot(fpr, tpr, color='darkorange',
         lw=2, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--', label='Random Guess')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic (ROC)')
plt.legend(loc="lower right")
roc_plot_path = os.path.join(PLOT_DIR, 'roc_curve.png')
plt.savefig(roc_plot_path)
plt.close()
logger.info(f"Saved ROC curve plot to {roc_plot_path}")
# --- /绘图5 ---


# --- 绘图6: 特征重要性 (来自Random Forest) ---
# 注意：由于我们进行了多项式特征工程，原始特征的重要性会被分散到对应的多项式项上。
# 这里展示的是最终模型使用的特征（包括多项式项）的重要性排序前N个。
# 如果你想看原始特征的重要性，需要在多项式变换前训练一个RF模型。
feature_names = [f"feature_{i}" for i in range(X_processed.shape[1])]  # Generic names after processing
importances = best_rf.feature_importances_
indices = np.argsort(importances)[::-1]

# 显示前20个最重要的特征
top_n = min(20, len(importances))
plt.figure(figsize=(10, 6))
plt.title(f"Top {top_n} Feature Importances (Random Forest)")
bars = plt.bar(range(top_n), importances[indices[:top_n]], align="center")
plt.xticks(range(top_n), [feature_names[i] for i in indices[:top_n]], rotation=45, ha='right')
plt.xlim([-1, top_n])
plt.tight_layout()  # Adjust layout to prevent clipping of labels
feat_imp_plot_path = os.path.join(PLOT_DIR, 'feature_importance_top20.png')
plt.savefig(feat_imp_plot_path)
plt.close()
logger.info(f"Saved top 20 feature importance plot to {feat_imp_plot_path}")
# --- /绘图6 ---


# 保存训练好的 Voting Classifier 模型
# model_save_path = os.path.join(MODEL_DIR, 'trained_voting_classifier2.pkl')
# joblib.dump(voting_clf, model_save_path)
# logger.info(f"Model saved to {model_save_path}")

# # 保存预处理器
# preprocessor_save_path = os.path.join(MODEL_DIR, 'preprocessor.pkl')
# joblib.dump(full_preprocessor, preprocessor_save_path)  # 保存你在上面定义的 full_preprocessor
# logger.info(f"Preprocessor saved to {preprocessor_save_path}")
#
# # <<< 新增：保存原始特征名称 >>>
# feature_names_path = os.path.join(MODEL_DIR, 'feature_names.npy')
# np.save(feature_names_path, X.columns.values)  # 保存训练时使用的原始特征列名
# logger.info(f"Original feature names saved to {feature_names_path}")
# <<< /新增 >>>

logger.info("Training script completed.")
