#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
LightGBM超参数调优专用脚本
LightGBM Hyperparameter Tuning Script

Author: ML Team
Date: 2025-11-16
Description: 专门用于LightGBM的超参数调优
"""

import os
import sys
import json
import time
import numpy as np
import pandas as pd
from datetime import datetime
from concurrent.futures import ProcessPoolExecutor

# 添加项目根目录到路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))


class LightGBMHyperparameterTuner:
    """LightGBM专用超参数调优器"""

    def __init__(self):
        """初始化LightGBM调优器"""
        self.X_train, self.X_val, self.y_train, self.y_val = self.load_data()
        if self.X_train is None:
            raise ValueError("数据加载失败，无法初始化调优器")

        # 合并训练和验证数据用于交叉验证
        self.X_full = pd.concat([self.X_train, self.X_val])
        self.y_full = pd.concat([self.y_train, self.y_val])

        # 输出目录
        self.output_dir = os.path.join(os.path.dirname(__file__), "lightgbm_tuning")
        os.makedirs(self.output_dir, exist_ok=True)

        print("LightGBM超参数调优器初始化完成")

    def load_data(self):
        """加载数据"""
        print("[INFO] 加载数据...")

        try:
            # 获取项目根目录 - 使用绝对路径
            script_file = os.path.abspath(__file__)
            model_dir = os.path.dirname(script_file)
            project_root = os.path.dirname(model_dir)

            # 加载训练数据
            train_path = os.path.join(project_root, 'feature', 'train_feature.csv')

            if not os.path.exists(train_path):
                print(f"[ERROR] 训练数据文件不存在: {train_path}")
                return None, None, None, None

            train_data = pd.read_csv(train_path)

            # 准备数据
            X = train_data.drop(['SalePrice', 'Id'], axis=1, errors='ignore')
            y = np.log1p(train_data['SalePrice'])  # 对数变换

            # 简单分割：80%训练，20%验证
            from sklearn.model_selection import train_test_split
            X_train, X_val, y_train, y_val = train_test_split(
                X, y, test_size=0.2, random_state=49
            )

            print(f"[INFO] 训练数据大小: {X_train.shape}")
            print(f"[INFO] 验证数据大小: {X_val.shape}")

            return X_train, X_val, y_train, y_val

        except Exception as e:
            print(f"[ERROR] 数据加载失败: {e}")
            return None, None, None, None

    def get_param_grid(self):
        """获取LightGBM参数网格"""
        return {
            'learning_rate': [0.003, 0.005, 0.008, 0.01, 0.015, 0.02],
            'n_estimators': [500, 800, 1000, 1500, 2000],
            'max_depth': [-1, 6, 8, 10, 12],
            'num_leaves': [15, 31, 63, 127, 255],
            'min_child_samples': [5, 10, 20, 30, 50],
            'subsample': [0.6, 0.7, 0.8, 0.9, 1.0],
            'colsample_bytree': [0.6, 0.7, 0.8, 0.9, 1.0],
            'reg_lambda': [0, 0.1, 0.5, 1.0, 2.0],
            'reg_alpha': [0, 0.1, 0.5, 1.0, 2.0],
            'min_split_gain': [0, 0.1, 0.2, 0.3, 0.4]
        }

    def get_random_params(self, n_iter=100):
        """生成随机参数组合"""
        param_grid = self.get_param_grid()
        params = []

        for _ in range(n_iter):
            param = {}
            for key, values in param_grid.items():
                param[key] = np.random.choice(values)
                # 转换numpy数据类型为Python原生类型
                if isinstance(param[key], (np.integer, np.floating, np.bool_)):
                    param[key] = param[key].item()
                elif isinstance(param[key], np.str_):
                    param[key] = str(param[key])
            params.append(param)

        return params

    def evaluate_single_params(self, params):
        """评估单组参数"""
        from lightgbm import LGBMRegressor
        from sklearn.model_selection import cross_val_score

        try:
            # 转换参数类型，确保是Python原生类型
            clean_params = {}
            for key, value in params.items():
                if isinstance(value, (np.integer, np.floating, np.bool_)):
                    clean_params[key] = value.item()
                else:
                    clean_params[key] = value

            # 创建LightGBM模型
            model = LGBMRegressor(**clean_params, random_state=49, n_jobs=-1, verbose=-1)

            # 5折交叉验证
            cv = 5
            rmse_scores = -cross_val_score(model, self.X_full, self.y_full,
                                         cv=cv, scoring='neg_root_mean_squared_error')
            mae_scores = -cross_val_score(model, self.X_full, self.y_full,
                                        cv=cv, scoring='neg_mean_absolute_error')
            r2_scores = cross_val_score(model, self.X_full, self.y_full,
                                      cv=cv, scoring='r2')

            return {
                'params': params,
                'rmse_mean': rmse_scores.mean(),
                'rmse_std': rmse_scores.std(),
                'mae_mean': mae_scores.mean(),
                'mae_std': mae_scores.std(),
                'r2_mean': r2_scores.mean(),
                'r2_std': r2_scores.std(),
                'success': True
            }

        except Exception as e:
            return {
                'params': params,
                'rmse_mean': float('inf'),
                'success': False,
                'error': str(e)
            }

    def leaf_wise_grid_search(self, n_workers=3):
        """Leaf-wise生长策略网格搜索"""
        print("开始LightGBM Leaf-wise策略网格搜索...")

        # Leaf-wise专用参数网格
        leaf_wise_grid = {
            'learning_rate': [0.003, 0.005, 0.008, 0.01, 0.015],
            'n_estimators': [800, 1000, 1500, 2000],
            'max_depth': [-1, 8, 10, 12],  # -1表示无限制
            'num_leaves': [31, 63, 127, 255],  # 叶子数量
            'min_child_samples': [10, 20, 30],
            'subsample': [0.7, 0.8, 0.9, 1.0],
            'colsample_bytree': [0.7, 0.8, 0.9, 1.0],
            'reg_lambda': [0, 0.1, 0.5, 1.0],
            'reg_alpha': [0, 0.1, 0.5],
            'min_split_gain': [0, 0.1, 0.2]
        }

        # 生成参数组合
        param_combinations = []
        from itertools import product
        keys = list(leaf_wise_grid.keys())
        values = list(leaf_wise_grid.values())

        for combination in product(*values):
            param_dict = dict(zip(keys, combination))
            param_combinations.append(param_dict)

        print(f"生成 {len(param_combinations)} 个Leaf-wise参数组合")

        return self._parallel_evaluate(param_combinations, n_workers, "leaf_wise_grid_search")

    def random_search(self, n_iter=150, n_workers=3):
        """随机搜索"""
        print(f"开始LightGBM随机搜索，迭代次数: {n_iter}")

        param_combinations = self.get_random_params(n_iter)
        return self._parallel_evaluate(param_combinations, n_workers, "random_search")

    def depth_wise_search(self, n_workers=3):
        """深度优先搜索 - 先优化深度相关参数"""
        print("开始LightGBM深度优先搜索...")

        # 第一阶段：优化max_depth和num_leaves
        print("第一阶段：优化深度相关参数")
        depth_params = self._optimize_depth_params()

        # 第二阶段：基于最佳深度参数优化其他参数
        print("\n第二阶段：基于最佳深度参数优化其他参数")
        if depth_params:
            best_depth = depth_params['best_params']
            other_params = self._optimize_other_params(best_depth)

            # 合并结果
            if other_params:
                all_results = self._combine_search_results(depth_params, other_params)
                return self._analyze_combined_results(all_results, "depth_wise_search")
            else:
                return depth_params
        else:
            print("深度参数优化失败，使用随机搜索")
            return self.random_search(n_iter=100, n_workers=n_workers)

    def _optimize_depth_params(self):
        """优化深度相关参数"""
        depth_combinations = []
        for max_depth in [-1, 8, 10, 12]:
            for num_leaves in [31, 63, 127, 255]:
                for learning_rate in [0.005, 0.008, 0.01]:
                    depth_combinations.append({
                        'max_depth': max_depth,
                        'num_leaves': min(num_leaves, 2**max_depth if max_depth > 0 else num_leaves),
                        'learning_rate': learning_rate,
                        'n_estimators': 1000,
                        'min_child_samples': 20,
                        'subsample': 0.8,
                        'colsample_bytree': 0.8,
                        'reg_lambda': 0.1,
                        'reg_alpha': 0.1,
                        'min_split_gain': 0.1
                    })

        return self._parallel_evaluate(depth_combinations[:20], 2, "depth_optimization")

    def _optimize_other_params(self, best_depth):
        """基于最佳深度参数优化其他参数"""
        other_combinations = []
        base_params = best_depth.copy()

        # 在最佳深度参数基础上优化其他参数
        for subsample in [0.7, 0.8, 0.9, 1.0]:
            for colsample_bytree in [0.7, 0.8, 0.9, 1.0]:
                for reg_lambda in [0, 0.1, 0.5, 1.0]:
                    for reg_alpha in [0, 0.1, 0.5]:
                        for min_child_samples in [10, 20, 30]:
                            params = base_params.copy()
                            params.update({
                                'subsample': subsample,
                                'colsample_bytree': colsample_bytree,
                                'reg_lambda': reg_lambda,
                                'reg_alpha': reg_alpha,
                                'min_child_samples': min_child_samples
                            })
                            other_combinations.append(params)

        return self._parallel_evaluate(other_combinations[:40], 3, "other_optimization")

    def _parallel_evaluate(self, param_combinations, n_workers, search_type):
        """并行评估参数组合"""
        all_results = []
        start_time = time.time()

        # 分批处理
        batch_size = 40
        total_batches = (len(param_combinations) + batch_size - 1) // batch_size

        for i in range(0, len(param_combinations), batch_size):
            batch = param_combinations[i:i + batch_size]
            batch_num = i // batch_size + 1

            print(f"处理批次 {batch_num}/{total_batches} ({len(batch)} 个参数组合)")

            # 并行执行
            with ProcessPoolExecutor(max_workers=n_workers) as executor:
                futures = [executor.submit(self.evaluate_single_params, params) for params in batch]

                for j, future in enumerate(futures):
                    try:
                        result = future.result()
                        all_results.append(result)

                        # 实时显示进度
                        if (j + 1) % 15 == 0:
                            successful_results = [r for r in all_results if r.get('success', False)]
                            if successful_results:
                                best_rmse = min(r['rmse_mean'] for r in successful_results)
                                print(f"  批次进度: {j+1}/{len(batch)}, 当前最佳RMSE: {best_rmse:.6f}")

                    except Exception as e:
                        print(f"  批次中第{j+1}个参数评估失败: {e}")

        total_time = time.time() - start_time
        successful_results = [r for r in all_results if r.get('success', False)]

        print(f"{search_type}完成，耗时: {total_time:.2f}秒")
        print(f"成功评估: {len(successful_results)}/{len(all_results)}")

        return self._analyze_results(all_results, search_type)

    def _analyze_results(self, results, search_type):
        """分析结果"""
        successful_results = [r for r in results if r.get('success', False)]

        if not successful_results:
            print("没有成功的参数评估结果!")
            return None

        # 按RMSE排序
        sorted_results = sorted(successful_results, key=lambda x: x['rmse_mean'])
        best_result = sorted_results[0]

        # 统计信息
        rmse_scores = [r['rmse_mean'] for r in successful_results]

        analysis = {
            'model_type': 'lightgbm',
            'search_type': search_type,
            'total_evaluations': len(results),
            'successful_evaluations': len(successful_results),
            'best_params': best_result['params'],
            'best_rmse': best_result['rmse_mean'],
            'best_rmse_std': best_result['rmse_std'],
            'best_mae': best_result['mae_mean'],
            'best_r2': best_result['r2_mean'],
            'rmse_statistics': {
                'mean': np.mean(rmse_scores),
                'std': np.std(rmse_scores),
                'min': np.min(rmse_scores),
                'max': np.max(rmse_scores)
            },
            'top_10_results': sorted_results[:10]
        }

        # 保存结果
        self._save_results(analysis, sorted_results)

        # 打印结果
        self._print_results(analysis)

        return analysis

    def _combine_search_results(self, depth_results, other_results):
        """合并搜索结果"""
        combined_results = []

        # 添加深度优化结果
        if 'top_10_results' in depth_results:
            combined_results.extend(depth_results['top_10_results'])

        # 添加其他参数优化结果
        if 'top_10_results' in other_results:
            combined_results.extend(other_results['top_10_results'])

        # 按RMSE排序并去重
        combined_results = sorted(combined_results, key=lambda x: x['rmse_mean'])
        unique_results = []
        seen_params = set()

        for result in combined_results:
            # 创建参数签名用于去重
            param_signature = tuple(sorted(result['params'].items()))
            if param_signature not in seen_params:
                seen_params.add(param_signature)
                unique_results.append(result)

        return unique_results[:20]  # 保留前20个最佳结果

    def _analyze_combined_results(self, all_results, search_type):
        """分析合并后的结果"""
        if not all_results:
            print("没有可分析的合并结果!")
            return None

        # 按RMSE排序
        sorted_results = sorted(all_results, key=lambda x: x['rmse_mean'])
        best_result = sorted_results[0]

        # 统计信息
        rmse_scores = [r['rmse_mean'] for r in all_results]

        analysis = {
            'model_type': 'lightgbm',
            'search_type': search_type,
            'total_evaluations': len(all_results),
            'successful_evaluations': len(all_results),
            'best_params': best_result['params'],
            'best_rmse': best_result['rmse_mean'],
            'best_rmse_std': best_result['rmse_std'],
            'best_mae': best_result['mae_mean'],
            'best_r2': best_result['r2_mean'],
            'rmse_statistics': {
                'mean': np.mean(rmse_scores),
                'std': np.std(rmse_scores),
                'min': np.min(rmse_scores),
                'max': np.max(rmse_scores)
            },
            'top_10_results': sorted_results[:10]
        }

        # 保存结果
        self._save_results(analysis, sorted_results)

        # 打印结果
        self._print_results(analysis)

        return analysis

    def _save_results(self, analysis, sorted_results):
        """保存结果"""
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")

        # 保存详细结果
        results_file = os.path.join(self.output_dir, f"lightgbm_{analysis['search_type']}_results_{timestamp}.json")
        with open(results_file, 'w', encoding='utf-8') as f:
            json.dump({
                'analysis': analysis,
                'all_successful_results': sorted_results
            }, f, indent=2, ensure_ascii=False)

        # 保存CSV格式
        csv_data = []
        for i, result in enumerate(sorted_results[:20]):
            row = {
                'rank': i + 1,
                'rmse_mean': result['rmse_mean'],
                'rmse_std': result['rmse_std'],
                'mae_mean': result['mae_mean'],
                'r2_mean': result['r2_mean']
            }
            row.update(result['params'])
            csv_data.append(row)

        csv_df = pd.DataFrame(csv_data)
        csv_file = os.path.join(self.output_dir, f"lightgbm_{analysis['search_type']}_summary_{timestamp}.csv")
        csv_df.to_csv(csv_file, index=False, encoding='utf-8-sig')

        print(f"结果已保存:")
        print(f"  详细结果: {results_file}")
        print(f"  摘要结果: {csv_file}")

        # 更新模型配置
        self._update_model_config(analysis['best_params'])

    def _update_model_config(self, best_params):
        """更新模型配置文件"""
        config_path = os.path.join(os.path.dirname(__file__), "lightgbm", "model_config.json")

        if os.path.exists(config_path):
            with open(config_path, 'r', encoding='utf-8') as f:
                config = json.load(f)

            # 备份原始配置
            backup_path = config_path.replace('.json', f'_backup_{datetime.now().strftime("%Y%m%d_%H%M%S")}.json')
            with open(backup_path, 'w', encoding='utf-8') as f:
                json.dump(config, f, indent=2, ensure_ascii=False)
            print(f"原始配置已备份到: {backup_path}")

            # 更新参数
            config['model_params'].update(best_params)

            # 保存新配置
            with open(config_path, 'w', encoding='utf-8') as f:
                json.dump(config, f, indent=2, ensure_ascii=False)

            print(f"模型配置已更新: {config_path}")

    def _print_results(self, analysis):
        """打印结果"""
        print("\n" + "="*60)
        print("LightGBM超参数调优结果")
        print("="*60)
        print(f"搜索类型: {analysis['search_type']}")
        print(f"总评估次数: {analysis['total_evaluations']}")
        print(f"成功评估次数: {analysis['successful_evaluations']}")
        print(f"成功率: {analysis['successful_evaluations']/analysis['total_evaluations']*100:.1f}%")
        print()
        print("最佳结果:")
        print(f"  RMSE: {analysis['best_rmse']:.6f} ± {analysis['best_rmse_std']:.6f}")
        print(f"  MAE:  {analysis['best_mae']:.6f}")
        print(f"  R²:   {analysis['best_r2']:.4f}")
        print()
        print("最佳参数:")
        for key, value in analysis['best_params'].items():
            print(f"  {key}: {value}")
        print()
        print("RMSE统计:")
        print(f"  平均值: {analysis['rmse_statistics']['mean']:.6f}")
        print(f"  标准差: {analysis['rmse_statistics']['std']:.6f}")
        print(f"  最小值: {analysis['rmse_statistics']['min']:.6f}")
        print(f"  最大值: {analysis['rmse_statistics']['max']:.6f}")
        print("="*60)


def main():
    """主函数"""
    print("LightGBM超参数调优")
    print("-" * 40)

    tuner = LightGBMHyperparameterTuner()

    # 选择搜索策略
    print("选择搜索策略:")
    print("1. Leaf-wise网格搜索 (针对叶子生长)")
    print("2. 随机搜索 (快速全面)")
    print("3. 深度优先搜索 (先优化深度)")

    try:
        choice = input("请选择 (1, 2 或 3): ").strip()
        if choice == "1":
            print("\n开始Leaf-wise网格搜索...")
            tuner.leaf_wise_grid_search(n_workers=3)
        elif choice == "2":
            n_iter = input("请输入迭代次数 (默认150): ").strip()
            n_iter = int(n_iter) if n_iter.isdigit() else 150
            print(f"\n开始随机搜索，迭代次数: {n_iter}")
            tuner.random_search(n_iter=n_iter, n_workers=3)
        elif choice == "3":
            print("\n开始深度优先搜索...")
            tuner.depth_wise_search(n_workers=3)
        else:
            print("无效选择，使用默认随机搜索")
            tuner.random_search(n_iter=150, n_workers=3)
    except KeyboardInterrupt:
        print("\n调优被用户中断")
    except Exception as e:
        print(f"\n调优过程中出现错误: {e}")
        print("使用默认随机搜索...")
        tuner.random_search(n_iter=150, n_workers=3)


if __name__ == "__main__":
    main()