import joblib
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.compose import ColumnTransformer
from sklearn.feature_selection import RFECV
from sklearn.metrics import confusion_matrix, classification_report, roc_auc_score, f1_score, roc_curve
from sklearn.model_selection import train_test_split, StratifiedKFold, GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import LabelEncoder, MinMaxScaler, StandardScaler, OneHotEncoder, FunctionTransformer
from imblearn.over_sampling import RandomOverSampler, SMOTE, ADASYN
from sklearn.linear_model import LogisticRegression
from xgboost import XGBClassifier
from utils.common import apply_log_transform, get_log_transform_cols

plt.rcParams['font.family'] = 'SimHei'
plt.rcParams['font.size'] = 15

train_data = pd.read_csv('../data/train.csv')
# 删除无用的特征
train_data.drop(['EmployeeNumber', 'Over18', 'StandardHours'], axis=1, inplace=True)
# Resources
X = train_data.drop('Attrition', axis=1)
# Target variable
y = train_data['Attrition']
# 打印特征和标签的形状
print(f"特征的形状{X.shape}")
# 打印样本的类别分布
print(f"样本的类别分布:\n{y.value_counts()}")

# 定义类别特征和数值特征
nominal_features = [col for col in X.columns if X[col].dtype == 'object']
numeric_features = [col for col in X.columns if X[col].dtype in ['int64', 'float64']]


# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42, stratify=y)
# 打印训练集和测试集的形状
print("训练集的形状")
print(X_train.shape)
print(y_train.shape)
print(f"训练集的类别分布:\n{y_train.value_counts()}")
print("测试集的形状")
print(X_test.shape)
print(y_test.shape)
print(f"测试集的类别分布:\n{y_test.value_counts()}")


# 提取类别特征
X_train_nominal = X_train[nominal_features]
X_test_nominal = X_test[nominal_features]

# # 标签编码
# label_encoders = {}
#
# for col in X_train.select_dtypes(include='object').columns:
#     le = LabelEncoder()
#     X_train[col] = le.fit_transform(X_train[col])
#     label_encoders[col] = le  # 保存编码器用于预测阶段
#     X_test[col] = le.transform(X_test[col])  # 注意：不能 fit，只 transform
#
# # 保存 label_encoders 到文件
# joblib.dump(label_encoders, '../encoder/xgb_label_encoders.pkl')


# 初始化并训练 OneHotEncoder
onehot_encoder = OneHotEncoder(handle_unknown='ignore')  # 忽略未知类别
onehot_encoder.fit(X_train_nominal)  # 只在训练集上 fit

# 保存 encoder 供预测时使用
joblib.dump(onehot_encoder, '../encoder/xgb_onehot_encoder.pkl')

# 转换训练集和测试集
X_train_encoded = onehot_encoder.transform(X_train_nominal)
X_test_encoded = onehot_encoder.transform(X_test_nominal)

# 转为 DataFrame（保留列名方便合并）
encoded_feature_names = onehot_encoder.get_feature_names_out(nominal_features)
X_train_encoded_df = pd.DataFrame(X_train_encoded.toarray(), columns=encoded_feature_names, index=X_train.index)
X_test_encoded_df = pd.DataFrame(X_test_encoded.toarray(), columns=encoded_feature_names, index=X_test.index)

# 删除原始类别特征，并合并 One-Hot 编码后的特征
X_train = X_train.drop(nominal_features, axis=1)
X_test = X_test.drop(nominal_features, axis=1)

X_train = pd.concat([X_train, X_train_encoded_df], axis=1)
X_test = pd.concat([X_test, X_test_encoded_df], axis=1)

# X_train = X_train.drop(['JobRole_Manager', 'Department_Human Resources', 'JobRole_Human Resources',
#                   'BusinessTravel_Travel_Rarely', 'EducationField_Human Resources',
#                   'PerformanceRating', 'JobRole_Research Director',
#               'JobRole_Manufacturing Director', 'JobRole_Laboratory Technician',
#               'JobRole_Research Scientist', 'Education'], axis=1)
#
# X_test = X_test.drop(['JobRole_Manager', 'Department_Human Resources', 'JobRole_Human Resources',
#                   'BusinessTravel_Travel_Rarely', 'EducationField_Human Resources',
#                   'PerformanceRating', 'JobRole_Research Director',
#               'JobRole_Manufacturing Director', 'JobRole_Laboratory Technician',
#               'JobRole_Research Scientist', 'Education'], axis=1)

print(f"独热编码后训练集形状：{X_train.shape}")
print(f"独热编码后测试集形状：{X_test.shape}")

# 偏态分布处理
# 打印偏态分布处理的列，只通过训练集计算
skewness_cols = get_log_transform_cols(X_train, numeric_features)
print(f"偏态分布处理的列：{skewness_cols}")
joblib.dump(skewness_cols, '../encoder/xgb_skewness_cols.pkl')
# 分布对训练集和测试集进行偏态处理
X_train = apply_log_transform(X_train, skewness_cols)
X_test = apply_log_transform(X_test, skewness_cols)


# 标准化
scaler = StandardScaler()
X_train = pd.DataFrame(scaler.fit_transform(X_train), columns=X_train.columns)
X_test = pd.DataFrame(scaler.transform(X_test), columns=X_test.columns)
# 保存 scaler
joblib.dump(scaler, '../encoder/xgb_scaler.pkl')


# 训练集过采样，给少数类样本进行增加近似值样本
adasyn = ADASYN(random_state=42)
X_train, y_train = adasyn.fit_resample(X_train, y_train)
# 过采样后的训练集的形状
print(f"过采样后的训练集的形状{X_train.shape}")
# 打印样本的类别分布
print(f"过采样后的训练集的类别分布:\n{y_train.value_counts()}")

# 模型训练
# 定义网格搜索参数
param_grid = {
    'n_estimators': [50, 100, 150, 200, 250, 300],
    'learning_rate': [0.01, 0.1],
    'max_depth': [1, 3, 5, 7],
    # 'gamma': [0, 0.1, 0.5, 1],
    'scale_pos_weight': [len(y_train[y_train == 0]) / len(y_train[y_train == 1])]  # 类别权重
}
# 定义分层交叉验证
cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)

grid_search = GridSearchCV(
    estimator=XGBClassifier(random_state=42, eval_metric='logloss'),
    param_grid=param_grid,
    cv=cv,
    scoring='f1_macro',  # 替代默认的f1（平衡正负样本的F1）
    # scoring='recall',  # 替代默认的f1（平衡正负样本的F1）
    n_jobs=-1,
    verbose=2
)

# 训练模型
grid_search.fit(X_train, y_train)

# 输出最佳参数组合
print("最佳参数组合:")
for param, value in grid_search.best_params_.items():
    print(f"{param}: {value}")

# 使用最佳模型预测
best_model = grid_search.best_estimator_
y_pred = best_model.predict(X_test)
# 评估模型
print("\n=== 最终模型评估 ===")
y_pred_proba = best_model.predict_proba(X_test)[:, 1]
print(f"AUC: {roc_auc_score(y_test, y_pred_proba):.4f}")
print(f"F1-score: {f1_score(y_test, y_pred):.4f}")
print("\n分类报告:")
print(classification_report(y_test, y_pred))

# 特征重要性分析
importances = best_model.feature_importances_  # 直接获取
feature_names = X_train.columns

# 绘图
pd.Series(importances, index=feature_names).sort_values(ascending=False).plot(
    kind='barh', figsize=(20, 10)
)
plt.title('特征重要性')
plt.tight_layout()
plt.savefig('../img/XGBClassifier特征重要性.png', dpi=300, bbox_inches='tight')
plt.show()

# 重新训练模型
xgb = XGBClassifier(**grid_search.best_params_, random_state=42, eval_metric='logloss')
xgb.fit(X_train, y_train)
y_pred = xgb.predict(X_test)

# # 创建 RFECV 实例
# rfecv_selector = RFECV(
#     estimator=xgb,
#     step=1,                            # 每次删除一个特征
#     cv=cv,                             # StratifiedKFold 交叉验证对象
#     scoring='f1_macro',                # 评估指标
#     n_jobs=-1,                         # 多线程加速
#     verbose=1                          # 输出过程信息
# )
#
# # 执行 RFECV 特征选择
# rfecv_selector.fit(X_train, y_train)
#
# # 获取选中的特征名称
# selected_features = X_train.columns[rfecv_selector.support_]
#
# # 保存 RFECV 和所选特征
# joblib.dump(rfecv_selector, '../model/xgb_rfecv_selector.pkl')
# joblib.dump(selected_features, '../model/xgb_selected_features_rfecv.pkl')
#
# # 转换训练集和测试集
# X_train_rfecv = rfecv_selector.transform(X_train)
# X_test_rfecv = rfecv_selector.transform(X_test)
#
# # 转为 DataFrame（便于后续操作）
# X_train_rfecv = pd.DataFrame(X_train_rfecv, columns=selected_features, index=X_train.index)
# X_test_rfecv = pd.DataFrame(X_test_rfecv, columns=selected_features, index=X_test.index)
#
# print(f"RFECV 后训练集形状：{X_train_rfecv.shape}")
# print(f"RFECV 后测试集形状：{X_test_rfecv.shape}")
#
# # 使用 RFECV 后的数据重新训练模型
# xgb_rfecv = XGBClassifier(**grid_search.best_params_, random_state=42, eval_metric='logloss')
# xgb_rfecv.fit(X_train_rfecv, y_train)
#
# # 模型预测与评估
# y_pred_rfecv = xgb_rfecv.predict(X_test_rfecv)
# y_pred_proba_rfecv = xgb_rfecv.predict_proba(X_test_rfecv)[:, 1]
#
# print("\n=== RFECV 后模型评估 ===")
# print(f"AUC: {roc_auc_score(y_test, y_pred_proba_rfecv):.4f}")
# print(f"F1-score: {f1_score(y_test, y_pred_rfecv):.4f}")
# print("\n分类报告:")
# print(classification_report(y_test, y_pred_rfecv))
#
# # 保存最终模型
# joblib.dump(xgb_rfecv, '../model/xgb_best_rfecv.pkl')
# 评估模型



print("\n=== 训练模型在测试集上的评估 ===")
y_pred_proba = xgb.predict_proba(X_test)[:, 1]

fpr, tpr, thresholds = roc_curve(y_test, y_pred_proba)

# 找出 recall 较高的阈值（如 tpr >= 0.9）
best_thresholds = thresholds[np.where(tpr >= 0.9)]
print("建议的阈值:", best_thresholds[0])
threshold = best_thresholds[0]
y_pred_custom = (y_pred_proba >= threshold).astype(int)
print(f"AUC: {roc_auc_score(y_test, y_pred_proba):.4f}")

print(f"F1-score: {f1_score(y_test, y_pred_custom):.4f}")
print("\n分类报告:")
print(classification_report(y_test, y_pred_custom))
print("开始模型保存...")
joblib.dump(xgb, '../model/xgb_best.pkl')