import joblib
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.compose import ColumnTransformer
from sklearn.metrics import confusion_matrix, classification_report, roc_auc_score, f1_score, roc_curve
from sklearn.model_selection import train_test_split, StratifiedKFold, GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import LabelEncoder, MinMaxScaler, StandardScaler, OneHotEncoder, FunctionTransformer
from imblearn.over_sampling import RandomOverSampler, SMOTE, ADASYN
from sklearn.linear_model import LogisticRegression
from xgboost import XGBClassifier
from utils.common import apply_log_transform, get_log_transform_cols

plt.rcParams['font.family'] = 'SimHei'
plt.rcParams['font.size'] = 15

test_data = pd.read_csv('../data/test2.csv')
# 删除无用的特征
test_data.drop(['EmployeeNumber', 'Over18', 'StandardHours'], axis=1, inplace=True)
# Resources
X = test_data.drop('Attrition', axis=1)
# Target variable
y = test_data['Attrition']
# 打印特征和标签的形状
print(f"特征的形状{X.shape}")
# 打印样本的类别分布
print(f"样本的类别分布:\n{y.value_counts()}")

# 定义类别特征和数值特征
nominal_features = [col for col in X.columns if X[col].dtype == 'object']
numeric_features = [col for col in X.columns if X[col].dtype in ['int64', 'float64']]

# 加载 encoder、scaler 和 log_skewed_cols
onehot_encoder = joblib.load('../encoder/xgb_onehot_encoder.pkl')
scaler = joblib.load('../encoder/xgb_scaler.pkl')
skewness_cols = joblib.load('../encoder/xgb_skewness_cols.pkl')

# 标签编码（或 One-Hot 编码）
data_nominal = X[nominal_features]
data_numeric = X[numeric_features]


# # 加载编码器
# label_encoders = joblib.load('../encoder/xgb_label_encoders.pkl')
#
# # 标签编码
# for col in X.columns:
#     if col in label_encoders:
#         le = label_encoders[col]
#         # 注意：可能遇到训练时未见过的新类别，这里要做异常处理
#         X[col] = X[col].map(lambda x: x if x in le.classes_ else le.classes_[0])
#         # 如果有未知值，可替换为默认类或抛出警告
#         X[col] = le.transform(X[col])


# 使用训练好的 encoder 进行 One-Hot 编码
encoded = onehot_encoder.transform(data_nominal)
# 转为 DataFrame（保留列名方便合并）
encoded_feature_names = onehot_encoder.get_feature_names_out(nominal_features)
X_encoded_df = pd.DataFrame(encoded.toarray(), columns=encoded_feature_names, index=X.index)

# 删除原始类别特征，并合并 One-Hot 编码后的特征
X = X.drop(nominal_features, axis=1)

X = pd.concat([X, X_encoded_df], axis=1)

print(f"独热编码后数据集形状：{X.shape}")

# 应用 log transform（仅对训练时偏态的列）
X = apply_log_transform(X, skewness_cols)

# 标准化
X = pd.DataFrame(scaler.transform(X), columns=X.columns)

# 模型训练
xgb = joblib.load("../model/xgb_best.pkl")
y_pred = xgb.predict(X)
# 评估模型
print("\n=== 模型在新的数据上的评估 ===")
y_pred_proba = xgb.predict_proba(X)[:, 1]

fpr, tpr, thresholds = roc_curve(y, y_pred_proba)

# 找出 recall 较高的阈值（如 tpr >= 0.9）
best_thresholds = thresholds[np.where(tpr >= 0.9)]
print("建议的阈值:", best_thresholds[0])
threshold = best_thresholds[0]
y_pred_custom = (y_pred_proba >= threshold).astype(int)

print(f"AUC: {roc_auc_score(y, y_pred_proba):.4f}")
print(f"F1-score: {f1_score(y, y_pred_custom):.4f}")
print("\n分类报告:")
print(classification_report(y, y_pred_custom))
