# src/distributed_grid_search.py
"""
分布式网格搜索模块
"""
import os
import json
import numpy as np
import pandas as pd
from sklearn.model_selection import ParameterGrid, ParameterSampler
from sklearn.metrics import f1_score
import lightgbm as lgb
import joblib
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor, as_completed
import multiprocessing
from utils.log import Logger
import datetime
import random


class DistributedGridSearch:
    def __init__(self, X_train, y_train, X_val, y_val, n_jobs=-1, logger=None):
        self.X_train = X_train
        self.y_train = y_train
        self.X_val = X_val
        self.y_val = y_val
        self.n_jobs = n_jobs if n_jobs > 0 else multiprocessing.cpu_count()
        self.logger = logger or Logger(root_path="../", log_name="distributed_grid_search").get_logger()

    def evaluate_params(self, params):
        """
        评估一组参数的性能
        """
        try:
            # 创建模型
            model = lgb.LGBMClassifier(**params)

            # 训练模型
            model.fit(
                self.X_train, self.y_train,
                eval_set=[(self.X_val, self.y_val)],
                callbacks=[lgb.early_stopping(20, verbose=False)]
            )

            # 预测
            y_pred = model.predict(self.X_val)

            # 计算F1分数
            f1 = f1_score(self.y_val, y_pred)

            self.logger.info(f"参数组合评估完成: F1={f1:.4f}, 参数={params}")

            return {
                'params': params,
                'f1_score': f1,
                'model': model
            }
        except Exception as e:
            self.logger.error(f"参数评估失败: {str(e)}, 参数={params}")
            return {
                'params': params,
                'f1_score': 0.0,
                'model': None,
                'error': str(e)
            }

    def search(self, param_grid, scoring='f1'):
        """
        执行分布式网格搜索
        """
        self.logger.info("开始分布式网格搜索")
        self.logger.info(f"参数组合总数: {len(list(ParameterGrid(param_grid)))}")
        self.logger.info(f"并行工作进程数: {self.n_jobs}")

        # 生成所有参数组合
        param_combinations = list(ParameterGrid(param_grid))

        # 使用线程池执行参数评估
        results = []
        with ThreadPoolExecutor(max_workers=self.n_jobs) as executor:
            # 提交所有任务
            future_to_params = {
                executor.submit(self.evaluate_params, params): params
                for params in param_combinations
            }

            # 收集结果
            for future in as_completed(future_to_params):
                try:
                    result = future.result()
                    results.append(result)
                except Exception as e:
                    self.logger.error(f"任务执行异常: {str(e)}")

        # 按分数排序
        valid_results = [r for r in results if r['f1_score'] > 0]
        if not valid_results:
            self.logger.error("所有参数组合评估失败")
            return None

        best_result = max(valid_results, key=lambda x: x['f1_score'])

        self.logger.info(f"网格搜索完成")
        self.logger.info(f"最佳F1分数: {best_result['f1_score']:.4f}")
        self.logger.info(f"最佳参数: {best_result['params']}")

        return best_result


class DistributedCrossValidationGridSearch:
    def __init__(self, X, y, cv_folds=5, n_jobs=-1, logger=None):
        self.X = X
        self.y = y
        self.cv_folds = cv_folds
        self.n_jobs = n_jobs if n_jobs > 0 else multiprocessing.cpu_count()
        self.logger = logger or Logger(root_path="../", log_name="distributed_cv_grid_search").get_logger()

    def cross_validate_params(self, params):
        """
        对一组参数进行交叉验证
        """
        try:
            from sklearn.model_selection import StratifiedKFold

            # 创建交叉验证分割器
            skf = StratifiedKFold(n_splits=self.cv_folds, shuffle=True, random_state=22)

            f1_scores = []
            models = []

            for fold, (train_idx, val_idx) in enumerate(skf.split(self.X, self.y)):
                X_train_fold, X_val_fold = self.X.iloc[train_idx], self.X.iloc[val_idx]
                y_train_fold, y_val_fold = self.y.iloc[train_idx], self.y.iloc[val_idx]

                # 训练模型
                model = lgb.LGBMClassifier(**params)
                model.fit(
                    X_train_fold, y_train_fold,
                    eval_set=[(X_val_fold, y_val_fold)],
                    callbacks=[lgb.early_stopping(20, verbose=False)]
                )

                # 预测和评估
                y_pred = model.predict(X_val_fold)
                f1 = f1_score(y_val_fold, y_pred)
                f1_scores.append(f1)
                models.append(model)

                self.logger.debug(f"Fold {fold + 1}/{self.cv_folds}, F1={f1:.4f}")

            avg_f1 = np.mean(f1_scores)
            std_f1 = np.std(f1_scores)

            self.logger.info(f"交叉验证完成: 平均F1={avg_f1:.4f} (±{std_f1:.4f}), 参数={params}")

            return {
                'params': params,
                'mean_f1_score': avg_f1,
                'std_f1_score': std_f1,
                'f1_scores': f1_scores,
                'models': models
            }
        except Exception as e:
            self.logger.error(f"交叉验证失败: {str(e)}, 参数={params}")
            return {
                'params': params,
                'mean_f1_score': 0.0,
                'std_f1_score': 0.0,
                'f1_scores': [],
                'models': [],
                'error': str(e)
            }

    def search(self, param_grid):
        """
        执行分布式交叉验证网格搜索
        """
        self.logger.info("开始分布式交叉验证网格搜索")
        self.logger.info(f"交叉验证折数: {self.cv_folds}")
        self.logger.info(f"参数组合总数: {len(list(ParameterGrid(param_grid)))}")
        self.logger.info(f"并行工作进程数: {self.n_jobs}")

        # 生成所有参数组合
        param_combinations = list(ParameterGrid(param_grid))

        # 使用线程池执行参数评估
        results = []
        with ThreadPoolExecutor(max_workers=self.n_jobs) as executor:
            # 提交所有任务
            future_to_params = {
                executor.submit(self.cross_validate_params, params): params
                for params in param_combinations
            }

            # 收集结果
            for future in as_completed(future_to_params):
                try:
                    result = future.result()
                    results.append(result)
                except Exception as e:
                    self.logger.error(f"任务执行异常: {str(e)}")

        # 按分数排序
        valid_results = [r for r in results if r['mean_f1_score'] > 0]
        if not valid_results:
            self.logger.error("所有参数组合评估失败")
            return None

        best_result = max(valid_results, key=lambda x: x['mean_f1_score'])

        self.logger.info(f"交叉验证网格搜索完成")
        self.logger.info(f"最佳平均F1分数: {best_result['mean_f1_score']:.4f} (±{best_result['std_f1_score']:.4f})")
        self.logger.info(f"最佳参数: {best_result['params']}")

        return best_result


class BayesianGridSearch:
    """
    贝叶斯优化网格搜索
    """

    def __init__(self, X, y, X_val, y_val, n_jobs=-1, logger=None, n_iter=50, random_state=None):
        self.X = X
        self.y = y
        self.X_val = X_val
        self.y_val = y_val
        self.n_jobs = n_jobs if n_jobs > 0 else multiprocessing.cpu_count()
        self.logger = logger or Logger(root_path="../", log_name="bayesian_grid_search").get_logger()
        self.n_iter = n_iter
        self.random_state = random_state

    def evaluate_params(self, params):
        """
        评估一组参数的性能
        """
        try:
            # 创建模型
            model = lgb.LGBMClassifier(**params)

            # 训练模型
            model.fit(
                self.X, self.y,
                eval_set=[(self.X_val, self.y_val)],
                callbacks=[lgb.early_stopping(20, verbose=False)]
            )

            # 预测
            y_pred = model.predict(self.X_val)

            # 计算F1分数
            f1 = f1_score(self.y_val, y_pred)

            self.logger.info(f"贝叶斯参数评估完成: F1={f1:.4f}, 参数={params}")

            return {
                'params': params,
                'f1_score': f1,
                'model': model
            }
        except Exception as e:
            self.logger.error(f"贝叶斯参数评估失败: {str(e)}, 参数={params}")
            return {
                'params': params,
                'f1_score': 0.0,
                'model': None,
                'error': str(e)
            }

    def search(self, param_distributions):
        """
        执行贝叶斯优化网格搜索
        """
        self.logger.info("开始贝叶斯优化网格搜索")
        self.logger.info(f"参数采样次数: {self.n_iter}")
        self.logger.info(f"并行工作进程数: {self.n_jobs}")

        # 生成参数采样
        param_sampler = ParameterSampler(
            param_distributions,
            n_iter=self.n_iter,
            random_state=self.random_state
        )
        param_list = list(param_sampler)

        self.logger.info(f"实际采样参数组合数: {len(param_list)}")

        # 使用线程池执行参数评估
        results = []
        with ThreadPoolExecutor(max_workers=self.n_jobs) as executor:
            # 提交所有任务
            future_to_params = {
                executor.submit(self.evaluate_params, params): params
                for params in param_list
            }

            # 收集结果
            for future in as_completed(future_to_params):
                try:
                    result = future.result()
                    results.append(result)
                except Exception as e:
                    self.logger.error(f"任务执行异常: {str(e)}")

        # 按分数排序
        valid_results = [r for r in results if r['f1_score'] > 0]
        if not valid_results:
            self.logger.error("所有参数组合评估失败")
            return None

        best_result = max(valid_results, key=lambda x: x['f1_score'])

        self.logger.info(f"贝叶斯优化网格搜索完成")
        self.logger.info(f"最佳F1分数: {best_result['f1_score']:.4f}")
        self.logger.info(f"最佳参数: {best_result['params']}")

        return best_result


class DistributedGridSearchOptimizer:
    """
    分布式网格搜索优化器
    """

    def __init__(self, search_method='grid', n_jobs=-1, logger=None):
        self.search_method = search_method
        self.n_jobs = n_jobs if n_jobs > 0 else multiprocessing.cpu_count()
        self.logger = logger or Logger(root_path="../", log_name="distributed_optimizer").get_logger()

    def optimize(self, X, y, X_val, y_val, param_grid, **kwargs):
        """
        执行优化搜索
        """
        self.logger.info(f"使用 {self.search_method} 方法进行超参数优化")

        if self.search_method == 'bayesian':
            searcher = BayesianGridSearch(
                X=X,
                y=y,
                X_val=X_val,
                y_val=y_val,
                n_jobs=self.n_jobs,
                logger=self.logger,
                n_iter=kwargs.get('n_iter', 30),
                random_state=kwargs.get('random_state', None)
            )
            return searcher.search(param_grid)
        elif self.search_method == 'cv':
            searcher = DistributedCrossValidationGridSearch(
                X=X,
                y=y,
                cv_folds=kwargs.get('cv_folds', 3),
                n_jobs=self.n_jobs,
                logger=self.logger
            )
            return searcher.search(param_grid)
        else:  # grid
            searcher = DistributedGridSearch(
                X_train=X,
                y_train=y,
                X_val=X_val,
                y_val=y_val,
                n_jobs=self.n_jobs,
                logger=self.logger
            )
            return searcher.search(param_grid)
