"""
主要的训练模块的代码
    步骤:
        01- 创建训练的类
            初始化日志对象, 加载预处理的数据
        02- 数据分析
        03- 模型训练
"""
import joblib
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
from sklearn.model_selection import GridSearchCV, TimeSeriesSplit

# 导入日志模块
from utils.log import Logger
# 导入数据库模块
from utils.database import DatabaseConnection
# 导入Redis客户端
from utils.redis_client import RedisClient
# 导入配置管理
from utils.config import ConfigManager
# 导入分布式配置
from utils.distributed_config import DISTRIBUTED_CONFIG
# 导入时间模块
import datetime

# 导入模型
from lightgbm import LGBMClassifier, train as lgb_train
import lightgbm as lgb
# 导入预处理数据模块
from utils.common import data_preprocessing
# 绘图
import matplotlib.pyplot as plt

plt.rcParams['font.family'] = 'SimHei'
plt.rcParams['font.size'] = 15
# 警告
import warnings

warnings.filterwarnings("ignore")
# 导入pandas模块
import pandas as pd


class TalentAttritionModel:
    def __init__(self):
        # 创建配置管理器
        self.config_manager = ConfigManager()

        # 创建日志对象
        train_log_name = "train_" + datetime.datetime.now().strftime("%Y%m%d%H%M%S")
        self.logger = Logger(root_path="../", log_name=train_log_name).get_logger()

        # 获取数据库配置
        self.db_config = self.config_manager.get_database_config()

        # 创建Redis客户端
        redis_config = self.config_manager.get_redis_config()
        self.redis_client = RedisClient(redis_config)

        # 创建数据库连接对象
        self.db_connection = DatabaseConnection(self.db_config, self.redis_client)

        # 从数据库加载数据
        self.data_source = self.load_data_from_db()

    def load_data_from_db(self):
        """
        从数据库加载训练数据
        """
        try:
            query = "SELECT * FROM training_data"
            # 使用Redis缓存，缓存键为training_data，过期时间为1小时
            data = self.db_connection.execute_query(query, cache_key="training_data", cache_expire=3600)

            self.logger.info(f'从数据库成功加载 {len(data)} 条训练数据')
            return data
        except Exception as e:
            self.logger.error(f'从数据库加载数据失败: {str(e)}')
            raise

    def feature_engineering(self):
        """
        特征工程
        :return: 特征数据, 标签数据, 特征列名
        """
        # 尝试从Redis缓存获取特征工程结果
        cached_features = self.redis_client.get("training_features")
        if cached_features is not None:
            self.logger.info("从Redis缓存获取特征工程结果")
            return cached_features

        data = self.data_source.copy()

        # 删除无用特征（先检查列是否存在）
        columns_to_drop = ['id', 'created_at']
        # 检查employee_number列是否存在（考虑大小写不同的情况）
        if 'employee_number' in data.columns:
            columns_to_drop.append('employee_number')
        elif 'EmployeeNumber' in data.columns:
            columns_to_drop.append('EmployeeNumber')

        # 只删除存在的列
        existing_columns = [col for col in columns_to_drop if col in data.columns]
        if existing_columns:
            data = data.drop(existing_columns, axis=1)
            self.logger.info(f"删除了列: {existing_columns}")

        # 分离特征和标签（处理列名大小写问题）
        attrition_column = None
        for col in data.columns:
            if col.lower() == 'attrition':
                attrition_column = col
                break

        if attrition_column is None:
            self.logger.error(f"数据中未找到Attrition列，现有列名: {list(data.columns)}")
            raise KeyError("未找到Attrition列")

        y = data[attrition_column]
        X = data.drop(attrition_column, axis=1)

        # 对类别特征进行独热编码
        categorical_features = ['business_travel', 'department', 'education_field', 'gender', 'job_role',
                                'marital_status', 'over_time']
        # 处理类别特征列名大小写问题
        actual_categorical_features = []
        for feature in categorical_features:
            for col in X.columns:
                if col.lower() == feature.lower():
                    actual_categorical_features.append(col)
                    break

        if actual_categorical_features:
            X = pd.get_dummies(X, columns=actual_categorical_features)

        self.logger.info('特征工程完成')

        # 将特征工程结果存入Redis缓存
        features = (X, y, X.columns.tolist())
        self.redis_client.set("training_features", features, ex=3600)  # 缓存1小时

        # 特征数据, 标签数据, 特征列名
        return features

    def get_local_ip(self):
        """
        获取本机局域网IP地址的更可靠方法
        优先获取192.168.16.x网段的IP地址
        """
        import socket
        try:
            # 方法1: 通过连接UDP socket获取本地IP (不发送实际数据)
            s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
            # 连接到一个有效的私有IP地址来触发本地路由表查找
            # 使用192.168.16.1作为目标地址，这是一个常见的网关地址
            s.connect(("192.168.16.1", 1))
            ip = s.getsockname()[0]
            s.close()

            # 检查是否为192.168.16.x网段的IP
            if ip.startswith("192.168.16."):
                self.logger.info(f"通过UDP连接方法获取192.168.16.x网段IP: {ip}")
                return ip
            else:
                self.logger.warning(f"通过UDP连接方法获取的IP不是192.168.16.x网段: {ip}")
        except Exception as e:
            self.logger.warning(f"通过UDP连接方法获取IP失败: {e}")

        try:
            # 方法2: 遍历所有网络接口获取IP
            hostname = socket.gethostname()
            ip_list = socket.gethostbyname_ex(hostname)[2]

            # 优先选择192.168.16.x网段的IP
            target_ips = [ip for ip in ip_list if ip.startswith("192.168.16.") and not ip.startswith("127.")]
            if target_ips:
                selected_ip = target_ips[0]
                self.logger.info(f"通过主机名解析获取192.168.16.x网段IP: {selected_ip}")
                return selected_ip

            # 然后查找其他局域网IP
            private_ips = [ip for ip in ip_list if self.is_private_ip(ip) and not ip.startswith("127.")]
            if private_ips:
                selected_ip = private_ips[0]
                self.logger.info(f"通过主机名解析获取局域网IP: {selected_ip}")
                return selected_ip

            # 如果没有局域网IP，返回第一个非回环IP
            for ip in ip_list:
                if not ip.startswith("127."):
                    self.logger.info(f"通过主机名解析获取IP: {ip}")
                    return ip

            # 如果只有回环地址，返回回环地址
            if ip_list:
                self.logger.info(f"通过主机名解析获取回环IP: {ip_list[0]}")
                return ip_list[0]
        except Exception as e:
            self.logger.warning(f"通过主机名解析获取IP失败: {e}")

        # 方法3: 默认返回
        self.logger.warning("无法获取有效IP，使用默认IP: 127.0.0.1")
        return "127.0.0.1"

    def is_private_ip(self, ip):
        """
        判断IP是否为私有IP地址（局域网地址）
        """
        if ip.startswith("10."):
            return True
        if ip.startswith("172."):
            second_octet = int(ip.split(".")[1])
            return 16 <= second_octet <= 31
        if ip.startswith("192.168."):
            return True
        return False

    def setup_distributed_training(self):
        """
        设置分布式训练参数
        """
        # 从配置文件获取节点角色配置
        node_role = DISTRIBUTED_CONFIG.get("node_role", "auto")

        # 获取本机IP地址
        local_ip = self.get_local_ip()

        # 根据配置确定是否为主节点
        if node_role == "master":
            is_master = True
            self.logger.info("通过配置文件强制设置为master节点")
        elif node_role == "slave":
            is_master = False
            self.logger.info("通过配置文件强制设置为slave节点")
        else:  # auto
            # 检查是否为主节点
            is_master = local_ip == DISTRIBUTED_CONFIG["master_ip"]
            self.logger.info(f"自动检测节点角色: 本地IP地址: {local_ip}, 是否为主节点: {is_master}")

        self.logger.info(f"节点配置 - 本地IP: {local_ip}, 角色: {'master' if is_master else 'slave'}")

        # 构建参数
        params = {
            'objective': 'binary',
            'metric': 'binary_logloss',
            'boosting_type': 'gbdt',
            'num_leaves': 31,
            'learning_rate': 0.05,
            'feature_fraction': 0.9,
            'bagging_fraction': 0.8,
            'bagging_freq': 5,
            'verbose': 1,  # 增加日志输出
            'device_type': 'cpu'
        }

        # 添加分布式参数
        if len(DISTRIBUTED_CONFIG["machines"]) > 1:
            self.logger.info("配置分布式训练参数")
            params.update({
                'machines': ','.join(DISTRIBUTED_CONFIG["machines"]),
                'local_listen_port': DISTRIBUTED_CONFIG["local_listen_port"],
                'num_machines': DISTRIBUTED_CONFIG["num_machines"],
                'timeout': DISTRIBUTED_CONFIG["timeout"],
                'tree_learner': 'data'  # 可选: data, feature, voting
            })

        return params, is_master

    # 在 train.py 文件中找到 model_train 方法并进行如下修改：
    def model_train(self):
        """
        01-划分数据集
        02-处理类别不平衡
        03-网格搜索最优超参数
        04-训练模型
        05-评估模型
        06-保存模型
        :return:
        """
        self.logger.info('================开始训练模型===================')
        # 加载特征处理完成的数据
        X, y, feature_cols = self.feature_engineering()

        # 01-划分数据集：前70%为训练集,70%-85%为验证集,85%-100%为测试集
        train_index = int(len(X) * 0.7)
        val_index = int(len(X) * 0.85)
        X_train, X_val, X_test = X.iloc[:train_index, :], X.iloc[train_index:val_index, :], X.iloc[val_index:, :]
        y_train, y_val, y_test = y.iloc[:train_index], y.iloc[train_index:val_index], y.iloc[val_index:]

        # 检查类别分布
        self.logger.info(f"训练集类别分布: {y_train.value_counts()}")

        # 获取分布式训练阈值配置
        distributed_threshold = self.config_manager.get_model_config().get('distributed_threshold', 100000)
        self.logger.info(f"分布式训练阈值: {distributed_threshold}, 当前数据量: {len(X_train)}")

        # 计算类别权重以处理不平衡
        from sklearn.utils.class_weight import compute_class_weight
        import numpy as np
        classes = np.unique(y_train)
        class_weights = compute_class_weight('balanced', classes=classes, y=y_train)
        class_weight_dict = dict(zip(classes, class_weights))
        self.logger.info(f"类别权重: {class_weight_dict}")

        # 02-超参数优化
        # 定义参数网格用于网格搜索
        param_grid = {
            'n_estimators': [100, 300, 500],
            'max_depth': [3, 5, 7],
            'learning_rate': [0.01, 0.05, 0.1],
            'num_leaves': [15, 31, 63],
            'min_child_samples': [20, 50, 100]
        }

        # 使用时间序列分割进行交叉验证
        tscv = TimeSeriesSplit(n_splits=3)

        # 创建基础模型
        base_model = LGBMClassifier(
            random_state=22,
            class_weight=class_weight_dict,
            device_type='cpu'
        )

        # 执行网格搜索以优化F1-score
        self.logger.info("开始网格搜索优化超参数...")
        grid_search = GridSearchCV(
            estimator=base_model,
            param_grid=param_grid,
            cv=tscv,
            scoring='f1',  # 优化F1-score
            n_jobs=-1,
            verbose=1
        )

        grid_search.fit(X_train, y_train)

        self.logger.info(f"最佳参数: {grid_search.best_params_}")
        self.logger.info(f"最佳F1-score: {grid_search.best_score_:.4f}")

        # 使用最佳参数创建模型
        best_model = grid_search.best_estimator_

        # 03-训练模型 (配置分布式训练参数)
        # 检查是否启用分布式训练: 只有当数据量超过阈值且配置了多台机器时才启用
        if len(X_train) >= distributed_threshold and DISTRIBUTED_CONFIG["num_machines"] > 1:
            self.logger.info("启用分布式训练")
            # 使用原生LightGBM接口进行分布式训练
            params, is_master = self.setup_distributed_training()

            # 应用网格搜索找到的最佳参数
            params.update({
                'n_estimators': grid_search.best_params_['n_estimators'],
                'max_depth': grid_search.best_params_['max_depth'],
                'learning_rate': grid_search.best_params_['learning_rate'],
                'num_leaves': grid_search.best_params_['num_leaves'],
                'min_child_samples': grid_search.best_params_['min_child_samples'],
                'class_weight': class_weight_dict
            })

            self.logger.info(f"训练参数: {params}")

            # 转换数据为LightGBM Dataset格式
            train_data = lgb.Dataset(X_train, label=y_train)
            val_data = lgb.Dataset(X_val, label=y_val, reference=train_data)

            # 设置验证集
            valid_sets = [val_data]
            valid_names = ['eval']

            # 训练模型 (修复: 使用正确的早停方式)
            try:
                # 使用callbacks参数设置早停
                callbacks = [lgb.early_stopping(20), lgb.log_evaluation(50)]

                model = lgb_train(
                    params,
                    train_data,
                    num_boost_round=1000,
                    valid_sets=valid_sets,
                    valid_names=valid_names,
                    callbacks=callbacks
                )
                self.logger.info("分布式训练完成")
            except Exception as e:
                self.logger.error(f"分布式训练失败: {str(e)}")
                raise
        else:
            # 单机训练
            self.logger.info("使用单机训练")
            if len(X_train) < distributed_threshold:
                self.logger.info(f"数据量({len(X_train)})小于阈值({distributed_threshold})，使用单机训练")
            elif DISTRIBUTED_CONFIG["num_machines"] <= 1:
                self.logger.info("未配置多台机器，使用单机训练")

            # 使用网格搜索得到的最佳参数
            lgb_model = LGBMClassifier(
                n_estimators=grid_search.best_params_['n_estimators'],
                max_depth=grid_search.best_params_['max_depth'],
                learning_rate=grid_search.best_params_['learning_rate'],
                num_leaves=grid_search.best_params_['num_leaves'],
                min_child_samples=grid_search.best_params_['min_child_samples'],
                random_state=22,
                class_weight=class_weight_dict,
                device_type='cpu'
            )

            # 使用新的早停参数方式
            lgb_model.fit(
                X_train,
                y_train,
                eval_set=[(X_val, y_val)],
                callbacks=[lgb.early_stopping(20), lgb.log_evaluation(50)]
            )
            model = lgb_model

        # 04-评估模型
        y_pred = model.predict(X_test)
        y_pred_proba = model.predict_proba(X_test)[:, 1]

        accuracy = accuracy_score(y_test, y_pred)
        f1 = f1_score(y_test, y_pred)
        auc = roc_auc_score(y_test, y_pred_proba)

        self.logger.info(f"评估指标: Accuracy={accuracy:.4f}, F1 Score={f1:.4f}, AUC={auc:.4f}")

        # 详细评估报告
        from sklearn.metrics import classification_report, confusion_matrix
        self.logger.info("分类报告:\n" + classification_report(y_test, y_pred))
        self.logger.info("混淆矩阵:\n" + str(confusion_matrix(y_test, y_pred)))

        # 保存模型评估指标到数据库
        self.save_model_metrics('LightGBM', f1, 'F1_Score')
        self.save_model_metrics('LightGBM', accuracy, 'Accuracy')
        self.save_model_metrics('LightGBM', auc, 'AUC')

        # 05- 保存模型
        model_path = self.config_manager.get_model_config().get('model_path', '../model/lightgbm_model.pkl')
        self.logger.info(f"保存模型到: {model_path}")
        joblib.dump(model, model_path)

        # 将模型信息存入Redis
        model_info = {
            "model_name": "LightGBM",
            "accuracy": accuracy,
            "f1_score": f1,
            "auc": auc,
            "timestamp": datetime.datetime.now().isoformat(),
            "model_path": model_path,
            "best_params": grid_search.best_params_ if 'grid_search' in locals() else {}
        }
        self.redis_client.set_json("latest_model_info", model_info, ex=86400)  # 缓存24小时

        self.logger.info('===========结束训练模型===========')

    def save_model_metrics(self, model_name, metric_value, metric_name):
        """
        保存模型评估指标到数据库
        """
        try:
            query = """
            INSERT INTO model_metrics (model_name, metric_name, metric_value, timestamp)
            VALUES (%s, %s, %s, NOW())
            """
            self.db_connection.execute_update(query, (model_name, metric_name, metric_value))
            self.logger.info(f'模型评估指标已保存到数据库: {metric_name}={metric_value}')
        except Exception as e:
            self.logger.error(f'保存模型评估指标到数据库失败: {str(e)}')


if __name__ == '__main__':
    tam = TalentAttritionModel()
    tam.feature_engineering()
    tam.model_train()
