#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
超参数调优主脚本 - 支持异步并行执行
Hyperparameter Tuning Main Script with Async Parallel Execution

Author: ML Team
Date: 2025-11-16
Description: 统一的超参数调优工具，支持Grid Search和Random Search
"""

import os
import sys
import json
import time
import asyncio
import logging
import numpy as np
import pandas as pd
from datetime import datetime
from concurrent.futures import ProcessPoolExecutor, as_completed
from typing import Dict, List, Tuple, Any, Optional

# 添加项目根目录到路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

import sys
import os
from validation_report_utils import generate_validation_outputs

# 设置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('hyperparameter_tuning.log', encoding='utf-8'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)


class HyperparameterTuner:
    """超参数调优器 - 支持多种搜索策略和并行执行"""

    def __init__(self, model_type: str, search_strategy: str = "grid",
                 max_workers: int = None, cv_folds: int = 5):
        """
        初始化调优器

        Args:
            model_type: 模型类型 ('lasso', 'xgboost', 'lightgbm')
            search_strategy: 搜索策略 ('grid', 'random', 'bayesian')
            max_workers: 最大并行工作进程数
            cv_folds: 交叉验证折数
        """
        self.model_type = model_type
        self.search_strategy = search_strategy
        self.max_workers = max_workers or min(os.cpu_count(), 4)
        self.cv_folds = cv_folds

        # 加载数据
        self.X_train, self.X_val, self.y_train, self.y_val = self.load_data()

        # 准备特征
        self.X_train_full = pd.concat([self.X_train, self.X_val])
        self.y_train_full = pd.concat([self.y_train, self.y_val])

        # 输出目录
        self.output_dir = os.path.join(os.path.dirname(__file__), f"{model_type}_tuning")
        os.makedirs(self.output_dir, exist_ok=True)

        logger.info(f"初始化{model_type}模型调优器，搜索策略: {search_strategy}")

    def load_data(self):
        """加载数据"""
        logger.info("加载数据...")

        try:
            # 获取项目根目录
            script_file = os.path.abspath(__file__)
            model_dir = os.path.dirname(script_file)
            project_root = os.path.dirname(model_dir)

            # 加载训练数据
            train_path = os.path.join(project_root, 'feature', 'train_feature.csv')

            if not os.path.exists(train_path):
                logger.error(f"训练数据文件不存在: {train_path}")
                return None, None, None, None

            train_data = pd.read_csv(train_path)

            # 准备数据
            X = train_data.drop(['SalePrice', 'Id'], axis=1, errors='ignore')
            y = np.log1p(train_data['SalePrice'])  # 对数变换

            # 简单分割：80%训练，20%验证
            from sklearn.model_selection import train_test_split
            X_train, X_val, y_train, y_val = train_test_split(
                X, y, test_size=0.2, random_state=49
            )

            logger.info(f"训练数据大小: {X_train.shape}")
            logger.info(f"验证数据大小: {X_val.shape}")

            return X_train, X_val, y_train, y_val

        except Exception as e:
            logger.error(f"数据加载失败: {e}")
            return None, None, None, None

    def get_param_space(self) -> Dict[str, Any]:
        """获取参数搜索空间"""
        param_spaces = {
            'lasso': {
                'alpha': [0.0001, 0.0003, 0.0005, 0.001, 0.003, 0.005, 0.01, 0.03, 0.05, 0.1],
                'max_iter': [500, 1000, 2000],
                'tol': [1e-5, 1e-4, 1e-3],
                'selection': ['cyclic', 'random'],
                'precompute': [True, False]
            },
            'xgboost': {
                'learning_rate': [0.003, 0.005, 0.008, 0.01, 0.015, 0.02],
                'n_estimators': [500, 800, 1000, 1500, 2000],
                'max_depth': [3, 4, 5, 6, 7, 8],
                'min_child_weight': [1, 2, 3, 4, 5],
                'subsample': [0.6, 0.7, 0.8, 0.9, 1.0],
                'colsample_bytree': [0.6, 0.7, 0.8, 0.9, 1.0],
                'colsample_bylevel': [0.6, 0.7, 0.8, 0.9, 1.0],
                'gamma': [0, 0.1, 0.2, 0.3, 0.4, 0.5],
                'reg_lambda': [0.5, 1.0, 1.5, 2.0, 3.0],
                'reg_alpha': [0, 0.01, 0.05, 0.1, 0.5]
            },
            'lightgbm': {
                'learning_rate': [0.003, 0.005, 0.008, 0.01, 0.015, 0.02],
                'n_estimators': [500, 800, 1000, 1500, 2000],
                'max_depth': [-1, 6, 8, 10, 12],
                'num_leaves': [15, 31, 63, 127, 255],
                'min_child_samples': [5, 10, 20, 30, 50],
                'subsample': [0.6, 0.7, 0.8, 0.9, 1.0],
                'colsample_bytree': [0.6, 0.7, 0.8, 0.9, 1.0],
                'reg_lambda': [0, 0.1, 0.5, 1.0, 2.0],
                'reg_alpha': [0, 0.1, 0.5, 1.0, 2.0],
                'min_split_gain': [0, 0.1, 0.2, 0.3, 0.4]
            }
        }
        return param_spaces.get(self.model_type, {})

    def generate_param_combinations(self) -> List[Dict[str, Any]]:
        """生成参数组合"""
        param_space = self.get_param_space()

        if self.search_strategy == "grid":
            return self._grid_search_params(param_space)
        elif self.search_strategy == "random":
            return self._random_search_params(param_space, n_iter=50)
        else:
            raise ValueError(f"不支持的搜索策略: {self.search_strategy}")

    def _grid_search_params(self, param_space: Dict[str, List]) -> List[Dict[str, Any]]:
        """网格搜索参数组合"""
        from itertools import product

        keys = list(param_space.keys())
        values = list(param_space.values())
        combinations = []

        for combination in product(*values):
            param_dict = dict(zip(keys, combination))
            combinations.append(param_dict)

        logger.info(f"网格搜索生成 {len(combinations)} 个参数组合")
        return combinations

    def _random_search_params(self, param_space: Dict[str, List], n_iter: int = 50) -> List[Dict[str, Any]]:
        """随机搜索参数组合"""
        import random

        combinations = []
        for _ in range(n_iter):
            param_dict = {}
            for key, values in param_space.items():
                param_dict[key] = random.choice(values)
            combinations.append(param_dict)

        logger.info(f"随机搜索生成 {len(combinations)} 个参数组合")
        return combinations

    def evaluate_params(self, params: Dict[str, Any]) -> Dict[str, Any]:
        """评估单组参数 - 在子进程中运行"""
        try:
            if self.model_type == 'lasso':
                return self._evaluate_lasso_params(params)
            elif self.model_type == 'xgboost':
                return self._evaluate_xgboost_params(params)
            elif self.model_type == 'lightgbm':
                return self._evaluate_lightgbm_params(params)
            else:
                raise ValueError(f"不支持的模型类型: {self.model_type}")
        except Exception as e:
            logger.error(f"参数评估失败: {params}, 错误: {e}")
            return {'params': params, 'rmse': float('inf'), 'mae': float('inf'), 'r2': -float('inf')}

    def _evaluate_lasso_params(self, params: Dict[str, Any]) -> Dict[str, Any]:
        """评估Lasso参数"""
        from sklearn.linear_model import Lasso
        from sklearn.model_selection import cross_val_score
        from sklearn.preprocessing import StandardScaler
        from sklearn.pipeline import Pipeline

        # 创建管道
        pipeline = Pipeline([
            ('scaler', StandardScaler()),
            ('lasso', Lasso(**params, random_state=49))
        ])

        # 交叉验证
        rmse_scores = -cross_val_score(pipeline, self.X_train_full, self.y_train_full,
                                      cv=self.cv_folds, scoring='neg_root_mean_squared_error')
        mae_scores = -cross_val_score(pipeline, self.X_train_full, self.y_train_full,
                                     cv=self.cv_folds, scoring='neg_mean_absolute_error')
        r2_scores = cross_val_score(pipeline, self.X_train_full, self.y_train_full,
                                   cv=self.cv_folds, scoring='r2')

        return {
            'params': params,
            'rmse': {
                'mean': rmse_scores.mean(),
                'std': rmse_scores.std(),
                'scores': rmse_scores.tolist()
            },
            'mae': {
                'mean': mae_scores.mean(),
                'std': mae_scores.std(),
                'scores': mae_scores.tolist()
            },
            'r2': {
                'mean': r2_scores.mean(),
                'std': r2_scores.std(),
                'scores': r2_scores.tolist()
            }
        }

    def _evaluate_xgboost_params(self, params: Dict[str, Any]) -> Dict[str, Any]:
        """评估XGBoost参数"""
        from xgboost import XGBRegressor
        from sklearn.model_selection import cross_val_score

        model = XGBRegressor(**params, random_state=49, n_jobs=-1)

        # 交叉验证
        rmse_scores = -cross_val_score(model, self.X_train_full, self.y_train_full,
                                      cv=self.cv_folds, scoring='neg_root_mean_squared_error')
        mae_scores = -cross_val_score(model, self.X_train_full, self.y_train_full,
                                     cv=self.cv_folds, scoring='neg_mean_absolute_error')
        r2_scores = cross_val_score(model, self.X_train_full, self.y_train_full,
                                   cv=self.cv_folds, scoring='r2')

        return {
            'params': params,
            'rmse': {
                'mean': rmse_scores.mean(),
                'std': rmse_scores.std(),
                'scores': rmse_scores.tolist()
            },
            'mae': {
                'mean': mae_scores.mean(),
                'std': mae_scores.std(),
                'scores': mae_scores.tolist()
            },
            'r2': {
                'mean': r2_scores.mean(),
                'std': r2_scores.std(),
                'scores': r2_scores.tolist()
            }
        }

    def _evaluate_lightgbm_params(self, params: Dict[str, Any]) -> Dict[str, Any]:
        """评估LightGBM参数"""
        from lightgbm import LGBMRegressor
        from sklearn.model_selection import cross_val_score

        model = LGBMRegressor(**params, random_state=49, n_jobs=-1, verbose=-1)

        # 交叉验证
        rmse_scores = -cross_val_score(model, self.X_train_full, self.y_train_full,
                                      cv=self.cv_folds, scoring='neg_root_mean_squared_error')
        mae_scores = -cross_val_score(model, self.X_train_full, self.y_train_full,
                                     cv=self.cv_folds, scoring='neg_mean_absolute_error')
        r2_scores = cross_val_score(model, self.X_train_full, self.y_train_full,
                                   cv=self.cv_folds, scoring='r2')

        return {
            'params': params,
            'rmse': {
                'mean': rmse_scores.mean(),
                'std': rmse_scores.std(),
                'scores': rmse_scores.tolist()
            },
            'mae': {
                'mean': mae_scores.mean(),
                'std': mae_scores.std(),
                'scores': mae_scores.tolist()
            },
            'r2': {
                'mean': r2_scores.mean(),
                'std': r2_scores.std(),
                'scores': r2_scores.tolist()
            }
        }

    async def tune_parameters_async(self) -> Dict[str, Any]:
        """异步并行调优参数"""
        logger.info(f"开始{self.model_type}模型异步参数调优...")

        # 生成参数组合
        param_combinations = self.generate_param_combinations()
        logger.info(f"共生成 {len(param_combinations)} 个参数组合")

        # 分批处理以避免内存溢出
        batch_size = 20
        all_results = []

        start_time = time.time()

        for i in range(0, len(param_combinations), batch_size):
            batch = param_combinations[i:i + batch_size]
            logger.info(f"处理批次 {i//batch_size + 1}/{(len(param_combinations) + batch_size - 1)//batch_size}")

            # 并行执行当前批次
            with ProcessPoolExecutor(max_workers=self.max_workers) as executor:
                futures = [executor.submit(self.evaluate_params, params) for params in batch]

                for future in asyncio.as_completed([
                    asyncio.wrap_future(future, loop=asyncio.get_event_loop())
                    for future in futures
                ]):
                    try:
                        result = await future
                        all_results.append(result)

                        # 实时显示最佳结果
                        if len(all_results) % 5 == 0:
                            best_result = min(all_results, key=lambda x: x['rmse']['mean'])
                            logger.info(f"当前最佳RMSE: {best_result['rmse']['mean']:.6f}")

                    except Exception as e:
                        logger.error(f"参数评估异步任务失败: {e}")

        total_time = time.time() - start_time
        logger.info(f"参数调优完成，耗时: {total_time:.2f}秒")

        # 分析结果
        return self._analyze_results(all_results)

    def _analyze_results(self, results: List[Dict[str, Any]]) -> Dict[str, Any]:
        """分析调优结果"""
        # 按RMSE排序
        sorted_results = sorted(results, key=lambda x: x['rmse']['mean'])

        # 最佳参数
        best_result = sorted_results[0]

        # 统计信息
        rmse_scores = [r['rmse']['mean'] for r in results if r['rmse']['mean'] != float('inf')]

        analysis = {
            'model_type': self.model_type,
            'search_strategy': self.search_strategy,
            'total_evaluations': len(results),
            'successful_evaluations': len(rmse_scores),
            'best_params': best_result['params'],
            'best_score': {
                'rmse': best_result['rmse']['mean'],
                'rmse_std': best_result['rmse']['std'],
                'mae': best_result['mae']['mean'],
                'mae_std': best_result['mae']['std'],
                'r2': best_result['r2']['mean'],
                'r2_std': best_result['r2']['std']
            },
            'score_statistics': {
                'rmse_mean': np.mean(rmse_scores),
                'rmse_std': np.std(rmse_scores),
                'rmse_min': np.min(rmse_scores),
                'rmse_max': np.max(rmse_scores)
            },
            'top_10_results': sorted_results[:10]
        }

        # 保存结果
        self._save_results(analysis, sorted_results)

        return analysis

    def _save_results(self, analysis: Dict[str, Any], all_results: List[Dict[str, Any]]):
        """保存调优结果"""
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")

        # 保存详细结果
        results_file = os.path.join(self.output_dir, f"tuning_results_{timestamp}.json")
        with open(results_file, 'w', encoding='utf-8') as f:
            json.dump({
                'analysis': analysis,
                'all_results': all_results
            }, f, indent=2, ensure_ascii=False)

        # 保存CSV格式的结果摘要
        summary_data = []
        for i, result in enumerate(all_results[:50]):  # 只保存前50个结果
            row = {
                'rank': i + 1,
                'rmse_mean': result['rmse']['mean'],
                'rmse_std': result['rmse']['std'],
                'mae_mean': result['mae']['mean'],
                'r2_mean': result['r2']['mean']
            }
            row.update(result['params'])
            summary_data.append(row)

        summary_df = pd.DataFrame(summary_data)
        csv_file = os.path.join(self.output_dir, f"tuning_summary_{timestamp}.csv")
        summary_df.to_csv(csv_file, index=False, encoding='utf-8-sig')

        logger.info(f"调优结果已保存:")
        logger.info(f"  详细结果: {results_file}")
        logger.info(f"  摘要结果: {csv_file}")

        # 更新模型配置文件
        self._update_model_config(analysis['best_params'])

    def _update_model_config(self, best_params: Dict[str, Any]):
        """更新模型配置文件"""
        config_path = os.path.join(os.path.dirname(__file__), self.model_type, 'model_config.json')

        if os.path.exists(config_path):
            with open(config_path, 'r', encoding='utf-8') as f:
                config = json.load(f)

            # 更新模型参数
            config['model_params'].update(best_params)

            # 保存更新后的配置
            with open(config_path, 'w', encoding='utf-8') as f:
                json.dump(config, f, indent=2, ensure_ascii=False)

            logger.info(f"已更新模型配置文件: {config_path}")


async def main():
    """主函数 - 异步执行所有模型的调优"""
    logger.info("开始超参数调优...")

    # 调优配置
    tuning_configs = [
        {'model_type': 'lasso', 'strategy': 'random', 'max_workers': 2},
        {'model_type': 'xgboost', 'strategy': 'random', 'max_workers': 3},
        {'model_type': 'lightgbm', 'strategy': 'random', 'max_workers': 3}
    ]

    # 并行执行所有模型的调优
    tasks = []
    for config in tuning_configs:
        tuner = HyperparameterTuner(
            model_type=config['model_type'],
            search_strategy=config['strategy'],
            max_workers=config['max_workers']
        )
        task = asyncio.create_task(tuner.tune_parameters_async())
        tasks.append(task)

    # 等待所有任务完成
    results = await asyncio.gather(*tasks, return_exceptions=True)

    # 汇总结果
    logger.info("\n" + "="*80)
    logger.info("超参数调优结果汇总:")
    logger.info("="*80)

    for i, result in enumerate(results):
        if isinstance(result, Exception):
            logger.error(f"调优任务 {i+1} 失败: {result}")
        else:
            model_type = result['model_type']
            best_rmse = result['best_score']['rmse']
            best_r2 = result['best_score']['r2']

            logger.info(f"{model_type.upper():<10} 最佳RMSE: {best_rmse:.6f}, R2: {best_r2:.4f}")
            logger.info(f"  最佳参数: {result['best_params']}")
            logger.info("-" * 50)

    logger.info("超参数调优完成!")


if __name__ == "__main__":
    # 运行异步主函数
    asyncio.run(main())