# src/train.py
"""
主要的训练模块的代码
    步骤:
        01- 创建训练的类
            初始化日志对象, 加载预处理的数据
        02- 数据分析
        03- 模型训练
"""
import joblib
import numpy as np
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
from sklearn.model_selection import GridSearchCV, StratifiedKFold

# 导入日志模块
from utils.log import Logger
# 导入数据库模块
from utils.database import DatabaseConnection
# 导入Redis客户端
from utils.redis_client import RedisClient
# 导入配置管理
from utils.config import ConfigManager
# 导入分布式配置
from utils.distributed_config import DISTRIBUTED_CONFIG

from src.distributed_grid_search import DistributedGridSearchOptimizer
# 导入时间模块
import datetime

# 导入模型
from lightgbm import LGBMClassifier, train as lgb_train
import lightgbm as lgb
# 导入预处理数据模块
from utils.common import data_preprocessing
# 绘图
import matplotlib.pyplot as plt

plt.rcParams['font.family'] = 'SimHei'
plt.rcParams['font.size'] = 15
# 警告
import warnings

warnings.filterwarnings("ignore")
# 导入pandas模块
import pandas as pd


class TalentAttritionModel:
    def __init__(self):
        # 创建配置管理器
        self.config_manager = ConfigManager()

        # 创建日志对象
        train_log_name = "train_" + datetime.datetime.now().strftime("%Y%m%d%H%M%S")
        self.logger = Logger(root_path="../", log_name=train_log_name).get_logger()

        # 获取数据库配置
        self.db_config = self.config_manager.get_database_config()

        # 创建Redis客户端
        redis_config = self.config_manager.get_redis_config()
        self.redis_client = RedisClient(redis_config)

        # 创建数据库连接对象
        self.db_connection = DatabaseConnection(self.db_config, self.redis_client)

        # 从数据库加载数据
        self.data_source = self.load_data_from_db()

    def load_data_from_db(self):
        """
        从数据库加载训练数据
        """
        try:
            query = "SELECT * FROM training_data"
            # 使用Redis缓存，缓存键为training_data，过期时间为1小时
            data = self.db_connection.execute_query(query, cache_key="training_data", cache_expire=3600)

            self.logger.info(f'从数据库成功加载 {len(data)} 条训练数据')
            return data
        except Exception as e:
            self.logger.error(f'从数据库加载数据失败: {str(e)}')
            raise

    # 在 src/train.py 的 feature_engineering 方法中添加以下代码

    # 修改 src/train.py 中的 feature_engineering 方法
    def feature_engineering(self):
        """
        特征工程
        :return: 特征数据, 标签数据, 特征列名
        """
        # 尝试从Redis缓存获取特征工程结果
        cached_features = self.redis_client.get("training_features")
        if cached_features is not None:
            self.logger.info("从Redis缓存获取特征工程结果")
            return cached_features

        data = self.data_source.copy()

        # 删除无用特征（先检查列是否存在）
        columns_to_drop = ['id', 'created_at']
        # 检查employee_number列是否存在（考虑大小写不同的情况）
        if 'employee_number' in data.columns:
            columns_to_drop.append('employee_number')
        elif 'EmployeeNumber' in data.columns:
            columns_to_drop.append('EmployeeNumber')

        # 只删除存在的列
        existing_columns = [col for col in columns_to_drop if col in data.columns]
        if existing_columns:
            data = data.drop(existing_columns, axis=1)
            self.logger.info(f"删除了列: {existing_columns}")

        # 分离特征和标签（处理列名大小写问题）
        attrition_column = None
        for col in data.columns:
            if col.lower() == 'attrition':
                attrition_column = col
                break

        if attrition_column is None:
            self.logger.error(f"数据中未找到Attrition列，现有列名: {list(data.columns)}")
            raise KeyError("未找到Attrition列")

        y = data[attrition_column]
        X = data.drop(attrition_column, axis=1)

        # 特征工程：创建新特征
        self.logger.info("创建新特征...")

        # 年龄相关特征
        if 'Age' in X.columns and 'TotalWorkingYears' in X.columns:
            X['AgeToExperienceRatio'] = X['Age'] / (X['TotalWorkingYears'] + 1)
            X['YearsPerAge'] = X['TotalWorkingYears'] / (X['Age'] + 1)

        # 收入相关特征
        if 'MonthlyIncome' in X.columns and 'Age' in X.columns:
            X['IncomeToAgeRatio'] = X['MonthlyIncome'] / (X['Age'] + 1)

        if 'MonthlyIncome' in X.columns and 'TotalWorkingYears' in X.columns:
            X['IncomePerYearExperience'] = X['MonthlyIncome'] / (X['TotalWorkingYears'] + 1)

        # 工作稳定性特征
        if 'YearsAtCompany' in X.columns and 'YearsInCurrentRole' in X.columns:
            X['RoleStability'] = X['YearsInCurrentRole'] / (X['YearsAtCompany'] + 1)

        if 'NumCompaniesWorked' in X.columns and 'TotalWorkingYears' in X.columns:
            X['CompanyHoppingRate'] = X['NumCompaniesWorked'] / (X['TotalWorkingYears'] + 1)

        # 培训与工作年限比
        if 'TrainingTimesLastYear' in X.columns and 'TotalWorkingYears' in X.columns:
            X['TrainingFrequency'] = X['TrainingTimesLastYear'] / (X['TotalWorkingYears'] + 1)

        self.logger.info(f"创建了 {len(X.columns) - (len(data.columns) - 1)} 个新特征")

        # 对类别特征进行独热编码
        categorical_features = ['business_travel', 'department', 'education_field', 'gender', 'job_role',
                                'marital_status', 'over_time']
        # 处理类别特征列名大小写问题
        actual_categorical_features = []
        for feature in categorical_features:
            for col in X.columns:
                if col.lower() == feature.lower():
                    actual_categorical_features.append(col)
                    break

        if actual_categorical_features:
            self.logger.info(f"对以下分类特征进行独热编码: {actual_categorical_features}")
            X = pd.get_dummies(X, columns=actual_categorical_features, drop_first=True)
            self.logger.info(f"独热编码后特征数量: {X.shape[1]}")

        self.logger.info('特征工程完成')

        # 将特征工程结果存入Redis缓存
        features = (X, y, X.columns.tolist())
        self.redis_client.set("training_features", features, ex=3600)  # 缓存1小时

        # 特征数据, 标签数据, 特征列名
        return features

    def get_local_ip(self):
        """
        获取本机局域网IP地址的更可靠方法
        优先获取192.168.16.x网段的IP地址
        """
        import socket
        try:
            # 方法1: 通过连接UDP socket获取本地IP (不发送实际数据)
            s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
            # 连接到一个有效的私有IP地址来触发本地路由表查找
            # 使用192.168.16.1作为目标地址，这是一个常见的网关地址
            s.connect(("192.168.16.1", 1))
            ip = s.getsockname()[0]
            s.close()

            # 检查是否为192.168.16.x网段的IP
            if ip.startswith("192.168.16."):
                self.logger.info(f"通过UDP连接方法获取192.168.16.x网段IP: {ip}")
                return ip
            else:
                self.logger.warning(f"通过UDP连接方法获取的IP不是192.168.16.x网段: {ip}")
        except Exception as e:
            self.logger.warning(f"通过UDP连接方法获取IP失败: {e}")

        try:
            # 方法2: 遍历所有网络接口获取IP
            hostname = socket.gethostname()
            ip_list = socket.gethostbyname_ex(hostname)[2]

            # 优先选择192.168.16.x网段的IP
            target_ips = [ip for ip in ip_list if ip.startswith("192.168.16.") and not ip.startswith("127.")]
            if target_ips:
                selected_ip = target_ips[0]
                self.logger.info(f"通过主机名解析获取192.168.16.x网段IP: {selected_ip}")
                return selected_ip

            # 然后查找其他局域网IP
            private_ips = [ip for ip in ip_list if self.is_private_ip(ip) and not ip.startswith("127.")]
            if private_ips:
                selected_ip = private_ips[0]
                self.logger.info(f"通过主机名解析获取局域网IP: {selected_ip}")
                return selected_ip

            # 如果没有局域网IP，返回第一个非回环IP
            for ip in ip_list:
                if not ip.startswith("127."):
                    self.logger.info(f"通过主机名解析获取IP: {ip}")
                    return ip

            # 如果只有回环地址，返回回环地址
            if ip_list:
                self.logger.info(f"通过主机名解析获取回环IP: {ip_list[0]}")
                return ip_list[0]
        except Exception as e:
            self.logger.warning(f"通过主机名解析获取IP失败: {e}")

        # 方法3: 默认返回
        self.logger.warning("无法获取有效IP，使用默认IP: 127.0.0.1")
        return "127.0.0.1"

    def is_private_ip(self, ip):
        """
        判断IP是否为私有IP地址（局域网地址）
        """
        if ip.startswith("10."):
            return True
        if ip.startswith("172."):
            second_octet = int(ip.split(".")[1])
            return 16 <= second_octet <= 31
        if ip.startswith("192.168."):
            return True
        return False

    def setup_distributed_training(self):
        """
        设置分布式训练参数
        """
        # 从配置文件获取节点角色配置
        node_role = DISTRIBUTED_CONFIG.get("node_role", "auto")

        # 获取本机IP地址
        local_ip = self.get_local_ip()

        # 根据配置确定是否为主节点
        if node_role == "master":
            is_master = True
            self.logger.info("通过配置文件强制设置为master节点")
        elif node_role == "slave":
            is_master = False
            self.logger.info("通过配置文件强制设置为slave节点")
        else:  # auto
            # 检查是否为主节点
            is_master = local_ip == DISTRIBUTED_CONFIG["master_ip"]
            self.logger.info(f"自动检测节点角色: 本地IP地址: {local_ip}, 是否为主节点: {is_master}")

        self.logger.info(f"节点配置 - 本地IP: {local_ip}, 角色: {'master' if is_master else 'slave'}")

        # 构建参数
        params = {
            'objective': 'binary',
            'metric': 'binary_logloss',
            'boosting_type': 'gbdt',
            'num_leaves': 31,
            'learning_rate': 0.05,
            'feature_fraction': 0.9,
            'bagging_fraction': 0.8,
            'bagging_freq': 5,
            'verbose': 1,  # 增加日志输出
            'device_type': 'cpu'
        }

        # 添加分布式参数
        if len(DISTRIBUTED_CONFIG["machines"]) > 1:
            self.logger.info("配置分布式训练参数")
            params.update({
                'machines': ','.join(DISTRIBUTED_CONFIG["machines"]),
                'local_listen_port': DISTRIBUTED_CONFIG["local_listen_port"],
                'num_machines': DISTRIBUTED_CONFIG["num_machines"],
                'timeout': DISTRIBUTED_CONFIG["timeout"],
                'tree_learner': 'data'  # 可选: data, feature, voting
            })

        return params, is_master

        # 在 src/train.py 的 model_train 方法中添加数据分布检查
    def model_train(self):
        """
            01-划分数据集
            02-处理类别不平衡 (使用改进的采样方法)
            03-优化网格搜索最优超参数
            04-训练模型
            05-评估模型
            06-保存模型
            :return:
        """
        self.logger.info('================开始训练模型===================')
        # 加载特征处理完成的数据
        X, y, feature_cols = self.feature_engineering()

         # 01-划分数据集：前70%为训练集,70%-85%为验证集,85%-100%为测试集
        train_index = int(len(X) * 0.7)
        val_index = int(len(X) * 0.85)
        X_train, X_val, X_test = X.iloc[:train_index, :], X.iloc[train_index:val_index, :], X.iloc[val_index:, :]
        y_train, y_val, y_test = y.iloc[:train_index], y.iloc[train_index:val_index], y.iloc[val_index:]

        # 检查各类别数据分布
        self.logger.info(f"训练集类别分布: {y_train.value_counts()}")
        self.logger.info(f"验证集类别分布: {y_val.value_counts()}")
        self.logger.info(f"测试集类别分布: {y_test.value_counts()}")

        # 获取分布式训练阈值配置
        distributed_threshold = self.config_manager.get_model_config().get('distributed_threshold', 100000)
        self.logger.info(f"分布式训练阈值: {distributed_threshold}, 当前数据量: {len(X_train)}")

        # 02-使用更高级的不平衡处理方法
        try:
            from imblearn.combine import SMOTETomek
            from imblearn.over_sampling import ADASYN

            # 检查类别分布
            class_distribution = y_train.value_counts()
            ratio = class_distribution.min() / class_distribution.max()

            # 根据不平衡程度选择采样方法
            if ratio < 0.1:  # 严重不平衡
                self.logger.info("检测到严重类别不平衡，使用ADASYN采样")
                sampler = ADASYN(random_state=22)
            else:
                self.logger.info("使用SMOTETomek组合采样")
                sampler = SMOTETomek(random_state=22)

            # 应用采样
            X_train_resampled, y_train_resampled = sampler.fit_resample(X_train, y_train)

            self.logger.info(f"采样处理后训练集类别分布: {pd.Series(y_train_resampled).value_counts()}")
            self.logger.info(f"训练集样本数从 {len(X_train)} 变为 {len(X_train_resampled)}")

        except ImportError:
            self.logger.warning("未安装imbalanced-learn库，使用class_weight处理不平衡")
            X_train_resampled, y_train_resampled = X_train, y_train
            # 计算类别权重
            from sklearn.utils.class_weight import compute_class_weight
            import numpy as np
            classes = np.unique(y_train)
            class_weights = compute_class_weight('balanced', classes=classes, y=y_train)
            class_weight_dict = dict(zip(classes, class_weights))
        except Exception as e:
            self.logger.error(f"采样处理失败: {str(e)}，使用原始数据训练")
            X_train_resampled, y_train_resampled = X_train, y_train
            class_weight_dict = None

        # 03-优化网格搜索超参数
        param_distributions = {
            'n_estimators': [100, 200, 300, 400],  # 减少树的数量
            'max_depth': [3, 4, 5, 6],  # 降低最大深度
            'learning_rate': [0.05, 0.1, 0.15],
            'num_leaves': [15, 31, 63],
            'min_child_samples': [20, 30, 50],
            'subsample': [0.7, 0.8, 0.9],
            'colsample_bytree': [0.7, 0.8, 0.9],
            'reg_alpha': [0.1, 0.5, 1.0],  # 增加L1正则化
            'reg_lambda': [0.1, 0.5, 1.0]  # 增加L2正则化
        }

        # 获取优化配置
        grid_search_method = self.config_manager.get_model_config().get('grid_search_method', 'bayesian')
        grid_search_n_iter = self.config_manager.get_model_config().get('grid_search_n_iter', 30)
        grid_search_n_jobs = self.config_manager.get_model_config().get('grid_search_n_jobs', -1)

        # 使用优化的网格搜索
        self.logger.info(f"使用 {grid_search_method} 方法进行超参数优化")

        # 创建分布式网格搜索优化器
        optimizer = DistributedGridSearchOptimizer(
            search_method=grid_search_method,
            n_jobs=grid_search_n_jobs,
            logger=self.logger
        )

        # 执行优化搜索
        if grid_search_method == 'bayesian':
            search_result = optimizer.optimize(
                X=X_train_resampled,
                y=y_train_resampled,
                X_val=X_val,
                y_val=y_val,
                param_grid=param_distributions,
                n_iter=grid_search_n_iter,
                random_state=22
            )
        elif grid_search_method == 'cv':
            search_result = optimizer.optimize(
                X=X_train_resampled,
                y=y_train_resampled,
                X_val=X_val,
                y_val=y_val,
                param_grid=param_distributions,
                cv_folds=3
            )
        else:  # grid
            search_result = optimizer.optimize(
                X=X_train_resampled,
                y=y_train_resampled,
                X_val=X_val,
                y_val=y_val,
                param_grid=param_distributions
            )

        if search_result:
            best_params = search_result['params']
            if grid_search_method == 'cv':
                best_score = search_result['mean_f1_score']
            else:
                best_score = search_result['f1_score']
            self.logger.info(f"网格搜索完成，最佳参数: {best_params}")
            self.logger.info(f"最佳F1分数: {best_score:.4f}")
        else:
            self.logger.error("网格搜索失败，使用默认参数")
            best_params = {
                'n_estimators': 500,
                'max_depth': 7,
                'learning_rate': 0.05,
                'num_leaves': 63,
                'min_child_samples': 50,
                'subsample': 0.9,
                'colsample_bytree': 0.9,
                'reg_alpha': 0.1,
                'reg_lambda': 0.1
            }
            best_score = 0.0

        # 04-创建最佳模型
        best_model = lgb.LGBMClassifier(
            **best_params,
            random_state=22,
            device_type='cpu',
            class_weight=class_weight_dict if 'class_weight_dict' in locals() else None
        )

        # 在验证集上寻找最佳阈值
        best_model.fit(
            X_train_resampled, y_train_resampled,
            eval_set=[(X_val, y_val)],
            callbacks=[lgb.early_stopping(20), lgb.log_evaluation(50)]
        )

        y_val_proba = best_model.predict_proba(X_val)[:, 1]
        from sklearn.metrics import precision_recall_curve
        import numpy as np

        # 计算不同阈值下的F1分数
        precisions, recalls, thresholds = precision_recall_curve(y_val, y_val_proba)
        f1_scores = 2 * (precisions * recalls) / (precisions + recalls + 1e-8)
        best_threshold = thresholds[np.argmax(f1_scores)]
        best_f1_on_val = np.max(f1_scores)

        self.logger.info(f"验证集上最佳阈值: {best_threshold:.4f}, 对应F1分数: {best_f1_on_val:.4f}")

        # 05-使用最佳参数重新训练完整模型 (配置分布式训练参数)
        # 检查是否启用分布式训练: 只有当数据量超过阈值且配置了多台机器时才启用
        if len(X_train_resampled) >= distributed_threshold and DISTRIBUTED_CONFIG["num_machines"] > 1:
            self.logger.info("启用分布式训练")
            # 使用原生LightGBM接口进行分布式训练
            params, is_master = self.setup_distributed_training()

            # 应用网格搜索找到的最佳参数
            params.update(best_params)

            self.logger.info(f"训练参数: {params}")

            # 转换数据为LightGBM Dataset格式
            train_data = lgb.Dataset(X_train_resampled, label=y_train_resampled)
            val_data = lgb.Dataset(X_val, label=y_val, reference=train_data)

            # 设置验证集
            valid_sets = [val_data]
            valid_names = ['eval']

            # 训练模型
            try:
                callbacks = [lgb.early_stopping(20), lgb.log_evaluation(50)]

                model = lgb_train(
                    params,
                    train_data,
                    num_boost_round=1000,
                    valid_sets=valid_sets,
                    valid_names=valid_names,
                    callbacks=callbacks
                )
                self.logger.info("分布式训练完成")
            except Exception as e:
                self.logger.error(f"分布式训练失败: {str(e)}")
                raise
        else:
            # 单机训练
            self.logger.info("使用单机训练")
            if len(X_train_resampled) < distributed_threshold:
                self.logger.info(f"数据量({len(X_train_resampled)})小于阈值({distributed_threshold})，使用单机训练")
            elif DISTRIBUTED_CONFIG["num_machines"] <= 1:
                self.logger.info("未配置多台机器，使用单机训练")

            # 使用网格搜索得到的最佳参数
            lgb_model = LGBMClassifier(
                **best_params,
                random_state=22,
                device_type='cpu',
                class_weight=class_weight_dict if 'class_weight_dict' in locals() else None
            )

            # 使用早停参数方式
            lgb_model.fit(
                X_train_resampled,
                y_train_resampled,
                eval_set=[(X_val, y_val)],
                callbacks=[lgb.early_stopping(20), lgb.log_evaluation(50)]
            )
            model = lgb_model

        # 06-评估模型
        y_pred_proba = model.predict_proba(X_test)[:, 1]
        # 使用优化的阈值进行预测
        y_pred = (y_pred_proba >= best_threshold).astype(int)

        accuracy = accuracy_score(y_test, y_pred)
        f1 = f1_score(y_test, y_pred)
        auc = roc_auc_score(y_test, y_pred_proba)

        self.logger.info(f"使用优化阈值 {best_threshold:.4f} 后的评估指标:")
        self.logger.info(f"评估指标: Accuracy={accuracy:.4f}, F1 Score={f1:.4f}, AUC={auc:.4f}")

        # 详细评估报告
        from sklearn.metrics import classification_report, confusion_matrix
        self.logger.info("分类报告:\n" + classification_report(y_test, y_pred))
        self.logger.info("混淆矩阵:\n" + str(confusion_matrix(y_test, y_pred)))

        # 保存模型评估指标到数据库
        self.save_model_metrics('LightGBM_Optimized', f1, 'F1_Score')
        self.save_model_metrics('LightGBM_Optimized', accuracy, 'Accuracy')
        self.save_model_metrics('LightGBM_Optimized', auc, 'AUC')

        # 07- 保存模型和阈值
        model_path = self.config_manager.get_model_config().get('model_path', '../model/lightgbm_optimized_model.pkl')
        self.logger.info(f"保存模型到: {model_path}")
        joblib.dump({
            'model': model,
            'threshold': best_threshold,
            'feature_cols': feature_cols,
            'best_params': best_params
        }, model_path)

        # 将模型信息存入Redis
        model_info = {
            "model_name": "LightGBM_Optimized",
            "accuracy": accuracy,
            "f1_score": f1,
            "auc": auc,
            "threshold": best_threshold,
            "timestamp": datetime.datetime.now().isoformat(),
            "model_path": model_path,
            "best_params": best_params,
            "sampling_applied": 'X_train_resampled' in locals() and len(X_train_resampled) != len(X_train),
            "grid_search_method": grid_search_method
        }
        self.redis_client.set_json("latest_model_info", model_info, ex=86400)  # 缓存24小时
        # 在训练完成后调用
        feature_importance = self.analyze_feature_importance(model, feature_cols)
        self.logger.info('===========结束训练模型===========')

    # 添加特征重要性分析
    def analyze_feature_importance(self, model, feature_cols):
        """
        分析特征重要性
        """
        if hasattr(model, 'feature_importances_'):
            feature_importance = pd.DataFrame({
                'feature': feature_cols,
                'importance': model.feature_importances_
            }).sort_values('importance', ascending=False)

            self.logger.info("Top 10 重要特征:")
            for i, row in feature_importance.head(10).iterrows():
                self.logger.info(f"  {row['feature']}: {row['importance']}")

            return feature_importance
        return None


    # 添加交叉验证评估
    # from sklearn.model_selection import StratifiedKFold
    # from sklearn.metrics import make_scorer
    def cross_validate_model(self, X, y, model_params, cv_folds=5):
        """
        使用交叉验证评估模型性能
        """
        skf = StratifiedKFold(n_splits=cv_folds, shuffle=True, random_state=22)
        f1_scores = []

        for fold, (train_idx, val_idx) in enumerate(skf.split(X, y)):
            X_train_fold, X_val_fold = X.iloc[train_idx], X.iloc[val_idx]
            y_train_fold, y_val_fold = y.iloc[train_idx], y.iloc[val_idx]

            # 训练模型
            model = lgb.LGBMClassifier(**model_params, random_state=22)
            model.fit(X_train_fold, y_train_fold)

            # 预测和评估
            y_pred = model.predict(X_val_fold)
            f1 = f1_score(y_val_fold, y_pred)
            f1_scores.append(f1)

            self.logger.info(f"交叉验证 Fold {fold + 1}: F1 Score = {f1:.4f}")

        avg_f1 = np.mean(f1_scores)
        std_f1 = np.std(f1_scores)
        self.logger.info(f"交叉验证平均F1 Score: {avg_f1:.4f} (±{std_f1:.4f})")

        return avg_f1, std_f1

    def save_model_metrics(self, model_name, metric_value, metric_name):
        """
        保存模型评估指标到数据库
        """
        try:
            query = """
            INSERT INTO model_metrics (model_name, metric_name, metric_value, timestamp)
            VALUES (%s, %s, %s, NOW())
            """
            self.db_connection.execute_update(query, (model_name, metric_name, metric_value))
            self.logger.info(f'模型评估指标已保存到数据库: {metric_name}={metric_value}')
        except Exception as e:
            self.logger.error(f'保存模型评估指标到数据库失败: {str(e)}')


if __name__ == '__main__':
    tam = TalentAttritionModel()
    tam.feature_engineering()
    tam.model_train()
