# import pandas as pd
# import numpy as np
# from sklearn.model_selection import train_test_split
# from sklearn.preprocessing import StandardScaler
# from sklearn.ensemble import RandomForestClassifier
# from sklearn.metrics import classification_report, confusion_matrix, roc_auc_score
# from sklearn.metrics import accuracy_score, roc_auc_score
#
# # 1. 数据加载
#
# train_data = pd.read_csv('../data/train.csv')
# test_data = pd.read_csv('../data/test.csv')
#
# # 删除明显无用的列
# train_data = train_data.drop(['EmployeeNumber', 'Over18', 'StandardHours'], axis=1)
# test_data = test_data.drop(['EmployeeNumber', 'Over18', 'StandardHours'], axis=1)
#
# # 2. 数据清洗和准备
# # 分离特征和目标
# x_train = train_data.drop('Attrition', axis=1)
# x_test = test_data.drop('Attrition', axis=1)
# y_train = train_data['Attrition']
# y_test = test_data['Attrition']
#
# # 简单的类别变量处理：直接用pandas的get_dummies
# x_train = pd.get_dummies(x_train)
# x_test = pd.get_dummies(x_test)
#
# # 确保训练集和测试集列一致
# x_test = x_test.reindex(columns=x_train.columns, fill_value=0)
#
# # 3. 标准化
# scaler = StandardScaler()
# x_train_scaled = scaler.fit_transform(x_train)
# x_test_scaled = scaler.transform(x_test)
#
# # 4. 模型训练和预测
# # 使用随机森林（简单有效，不需要太多调参）
# model = RandomForestClassifier(n_estimators=100, random_state=42)
# model.fit(x_train, y_train)
# # 预测
# y_predict = model.predict(x_test)
# model.score(x_test, y_test)
# # 测试结果
# print(classification_report(y_test, y_predict))
# print(f'准确率: {model.score(x_test, y_test)}')
# print(f'AUC值: {roc_auc_score(y_test, y_predict)}')
# print('***'*20)
#
#
#
# # 超参数选择代码
# rf = RandomForestClassifier()
# param = {"n_estimators": [80, 100, 200], "max_depth": [2, 4, 6, 8, 10, 12], "random_state": [9]}
# from sklearn.model_selection import GridSearchCV
#
# gc = GridSearchCV(rf, param_grid=param, cv=2)
# gc.fit(x_train, y_train)
# print("随机森林预测的准确率为：", gc.score(x_test, y_test))

import warnings
warnings.filterwarnings('ignore')

import pandas as pd
import numpy as np
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import (classification_report, roc_auc_score,
                             accuracy_score, roc_curve)
import joblib
from utils.data_load import dataload

# 1. 数据加载
X_train, X_test, y_train, y_test = dataload()

# 特征构造
# --- 比例类特征 ---
X_train['YearsAtCompany_Age_ratio'] = X_train['YearsAtCompany'] / (X_train['Age'] + 1e-5)
X_test['YearsAtCompany_Age_ratio'] = X_test['YearsAtCompany'] / (X_test['Age'] + 1e-5)

X_train['TotalWorkingYears_Age_ratio'] = X_train['TotalWorkingYears'] / (X_train['Age'] + 1e-5)
X_test['TotalWorkingYears_Age_ratio'] = X_test['TotalWorkingYears'] / (X_test['Age'] + 1e-5)

X_train['CurrentRole_Company_ratio'] = X_train['YearsInCurrentRole'] / (X_train['YearsAtCompany'] + 1e-5)
X_test['CurrentRole_Company_ratio'] = X_test['YearsInCurrentRole'] / (X_test['YearsAtCompany'] + 1e-5)

# --- 收入相关 ---
X_train['Income_per_Year'] = X_train['MonthlyIncome'] / (X_train['TotalWorkingYears'] + 1)
X_test['Income_per_Year'] = X_test['MonthlyIncome'] / (X_test['TotalWorkingYears'] + 1)

X_train['Income_per_Level'] = X_train['MonthlyIncome'] / (X_train['JobLevel'] + 1e-5)
X_test['Income_per_Level'] = X_test['MonthlyIncome'] / (X_test['JobLevel'] + 1e-5)

# --- 时间跨度特征 ---
X_train['Promotion_Frequency'] = X_train['YearsSinceLastPromotion'] / (X_train['YearsAtCompany'] + 1)
X_test['Promotion_Frequency'] = X_test['YearsSinceLastPromotion'] / (X_test['YearsAtCompany'] + 1)

X_train['RoleChange_Frequency'] = X_train['YearsInCurrentRole'] / (X_train['TotalWorkingYears'] + 1)
X_test['RoleChange_Frequency'] = X_test['YearsInCurrentRole'] / (X_test['TotalWorkingYears'] + 1)

# --- 满意度组合 ---
satisfaction_cols = ['EnvironmentSatisfaction', 'JobSatisfaction', 'RelationshipSatisfaction']
X_train['Satisfaction_Mean'] = X_train[satisfaction_cols].mean(axis=1)
X_test['Satisfaction_Mean'] = X_test[satisfaction_cols].mean(axis=1)

X_train['Satisfaction_Std'] = X_train[satisfaction_cols].std(axis=1)
X_test['Satisfaction_Std'] = X_test[satisfaction_cols].std(axis=1)

# --- 对数变换减少偏度 ---
for col in ['MonthlyIncome', 'DistanceFromHome', 'NumCompaniesWorked']:
    if col in X_train.columns:
        X_train[col] = np.log1p(X_train[col])
        X_test[col] = np.log1p(X_test[col])

# 识别非数值型的列（即分类特征）
categorical_features = X_train.select_dtypes(include=['object']).columns
# 对分类特征进行独热编码
X_train_encoded = pd.get_dummies(X_train[categorical_features],dtype=int)
X_test_encoded = pd.get_dummies(X_test[categorical_features],dtype=int)
print(X_train_encoded)

# 接下来，我们需要合并独热编码的特征回原始的数值型特征中
# 首先，识别数值型的列
numerical_features = X_train.select_dtypes(exclude=['object']).columns

# 选择数值型特征
X_train_numerical = X_train[numerical_features]
X_test_numerical = X_test[numerical_features]

# 合并数值型特征和独热编码的特征
X_train = pd.concat([X_train_numerical, X_train_encoded], axis=1)
X_test = pd.concat([X_test_numerical, X_test_encoded], axis=1)



# X_train, X_test, y_train, y_test = dataload()
#
# # 2. 类别特征 One-Hot；数值特征保留
# cat_cols = X_train.select_dtypes(include='object').columns
# num_cols = X_train.select_dtypes(exclude='object').columns
#
# X_train = pd.get_dummies(X_train, columns=cat_cols, drop_first=True, dtype=int)
# X_test  = pd.get_dummies(X_test,  columns=cat_cols, drop_first=True, dtype=int)
# X_test  = X_test.reindex(columns=X_train.columns, fill_value=0)

# 3. 标准化
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test  = scaler.transform(X_test)

# 4. 更大、更密的网格搜索（5 折 ROC-AUC）
param_grid = {
    'n_estimators': [400, 600, 800],
    'max_depth': [8, 10, 12, None],
    'min_samples_split': [2, 5, 10],
    'min_samples_leaf': [1, 2, 4],
    'max_features': ['sqrt', 'log2', 0.3],
    'criterion': ['gini', 'entropy'],
    'random_state': [42]
}

grid = GridSearchCV(
    estimator=RandomForestClassifier(class_weight='balanced'),
    param_grid=param_grid,
    cv=5,
    scoring='roc_auc',
    n_jobs=-1,
    verbose=1
)
grid.fit(X_train, y_train)

# print("===== 网格搜索完成 =====")
# print("最佳参数:", grid.best_params_)
# print("最佳交叉验证 AUC:", grid.best_score_)

# 5. 阈值后处理（直接再抬一次 AUC）
best_model = grid.best_estimator_
y_prob = best_model.predict_proba(X_test)[:, 1]
fpr, tpr, thresholds = roc_curve(y_test, y_prob)
best_thr = thresholds[np.argmax(tpr - fpr)]
y_pred = (y_prob >= best_thr).astype(int)

# print("\n===== 调优后评估 =====")
print(classification_report(y_test, y_pred))
print("准确率 :", accuracy_score(y_test, y_pred))
print("AUC    :", roc_auc_score(y_test, y_prob))
# print("阈值调优后 AUC:", roc_auc_score(y_test, y_pred))

# 6. 保存
joblib.dump(best_model, '../model/rf_gridcv_best.pkl')
# joblib.dump(scaler, '../model/rf_scaler.pkl')
# print("\n模型已保存至 ../model/rf_gridcv_best.pkl")

