import os
import sys

# 添加项目根目录到Python路径
ROOT_DIR = '/media/ross/8TB/project/lsh/deep_learning/DiffusionDet_mmdet/DiffusionDet'
sys.path.insert(0, ROOT_DIR)

import optuna
import torch
import logging
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mmengine.config import Config
from mmengine.runner import Runner
from mmengine.logging import MMLogger
from mmdet.utils import register_all_modules


class HPOptimization:
    """超参数优化类"""

    def __init__(
            self,
            base_config_path: str,
            n_trials: int = 25,
            study_name: str = "diffusiondet_hpo",
            storage: str = "sqlite:///hpo.db",
            work_dir: str = "/work_dirs/Hyperparametric_sensitivity/hpo",
            objective_weights: dict = None,  # 新增权重参数
            resume: bool = True  # 新增续跑参数
    ):
        # 注册所有模块
        register_all_modules()

        print(f"正在加载配置文件: {base_config_path}")
        self.base_config = Config.fromfile(base_config_path)
        self.n_trials = n_trials
        self.work_dir = work_dir
        os.makedirs(work_dir, exist_ok=True)

        # 设置目标函数权重，默认均衡考虑各项指标
        self.objective_weights = objective_weights or {
            'overall_map': 0.4,  # 整体mAP权重
            'density_weights': {  # 密度分级权重
                'sparse': 0.2,  # 稀疏场景权重
                'normal': 0.2,  # 普通场景权重
                'dense': 0.2  # 密集场景权重
            },
            'size_weights': {  # 尺寸分级权重
                'small': 0.0,  # 小目标权重
                'medium': 0.0,  # 中等目标权重
                'large': 0.0  # 大目标权重
            }
        }

        # 确保权重总和为1.0
        total_weight = (self.objective_weights['overall_map'] +
                        sum(self.objective_weights['density_weights'].values()) +
                        sum(self.objective_weights['size_weights'].values()))

        if abs(total_weight - 1.0) > 1e-6:
            print(f"警告: 权重总和 ({total_weight}) 不等于1.0，将进行归一化")
            scale_factor = 1.0 / total_weight
            self.objective_weights['overall_map'] *= scale_factor
            for k in self.objective_weights['density_weights']:
                self.objective_weights['density_weights'][k] *= scale_factor
            for k in self.objective_weights['size_weights']:
                self.objective_weights['size_weights'][k] *= scale_factor

        # 创建optuna study
        self.study = optuna.create_study(
            study_name=study_name,
            storage=storage,
            direction="maximize",  # 最大化综合指标
            sampler=optuna.samplers.TPESampler(seed=42),
            load_if_exists=resume
        )

        # 获取已完成的试验数
        completed_trials = len([t for t in self.study.trials if t.state == optuna.trial.TrialState.COMPLETE])
        remaining_trials = max(0, n_trials - completed_trials)

        print(f"已完成的试验数: {completed_trials}")
        print(f"剩余的试验数: {remaining_trials}")
        self.remaining_trials = remaining_trials

        # 设置日志
        self.logger = MMLogger.get_instance(
            name='HPO',
            log_file=os.path.join(work_dir, 'hpo.log')
        )

        # 记录目标函数权重
        self.logger.info(f"目标函数权重设置: {json.dumps(self.objective_weights, indent=2)}")

        # 分析配置结构
        self._analyze_config()

    def _analyze_config(self):
        """分析配置结构，确定可优化的参数"""
        self.logger.info("分析配置结构...")

        # 打印ADEM头部结构
        if hasattr(self.base_config.model.bbox_head, 'adem_head'):
            self.logger.info("ADEM头部参数:")
            for key, value in self.base_config.model.bbox_head.adem_head.items():
                self.logger.info(f"  {key}: {value}")
        else:
            self.logger.warning("模型没有adem_head配置")

        # 打印小物体蒸馏器结构
        if hasattr(self.base_config.model.bbox_head, 'small_object_distiller'):
            self.logger.info("小物体蒸馏器参数:")
            for key, value in self.base_config.model.bbox_head.small_object_distiller.items():
                self.logger.info(f"  {key}: {value}")
        else:
            self.logger.warning("模型没有small_object_distiller配置")

    def suggest_parameters(self, trial: optuna.Trial) -> dict:
        """定义参数搜索空间 - 针对DiffusionDet模型结构"""
        params = {
            # 1. LCM模块参数
            'lcm_guidance_scale': trial.suggest_float('lcm_guidance_scale', 0.5, 2.0),

            # 2. ADEM模块参数 - 使用正确的参数名称
            'adem_loss_weight': trial.suggest_float('adem_loss_weight', 0.1, 0.7),
            'adem_min_sigma': trial.suggest_float('adem_min_sigma', 0.01, 0.1),
            'adem_max_sigma': trial.suggest_float('adem_max_sigma', 1.0, 3.0),

            # 3. 动态DDIM参数
            'dynamic_steps': trial.suggest_int('dynamic_steps', 10, 25, step=5),
            'complexity_thresh': trial.suggest_float('complexity_thresh', 0.3, 0.7),

            # 4. 蒸馏参数
            'base_temp': trial.suggest_float('base_temp', 1.0, 2.5, step=0.5),
            'small_temp': trial.suggest_float('small_temp', 0.5, 1.2, step=0.1),
            'small_weight': trial.suggest_float('small_weight', 0.5, 0.9, step=0.1),
            'distill_weight': trial.suggest_float('distill_weight', 0.2, 0.5, step=0.1),

            # 5. 基础训练参数
            'learning_rate': trial.suggest_float('learning_rate', 5e-6, 5e-5, log=True),
            'weight_decay': trial.suggest_float('weight_decay', 1e-5, 1e-3, log=True),
        }
        return params

    def update_config(self, config: Config, params: dict) -> Config:
        """更新配置文件 - 针对DiffusionDet模型结构"""
        try:
            # 1. LCM相关参数
            config.model.bbox_head.lcm_guidance_scale = params['lcm_guidance_scale']

            # 2. ADEM相关参数 - 使用正确的参数名称
            config.model.bbox_head.adem_loss_weight = params['adem_loss_weight']
            config.model.bbox_head.adem_head.min_sigma = params['adem_min_sigma']
            config.model.bbox_head.adem_head.max_sigma = params['adem_max_sigma']

            # 3. 动态DDIM参数
            config.model.bbox_head.dynamic_steps = params['dynamic_steps']
            config.model.bbox_head.complexity_thresh = params['complexity_thresh']

            # 4. 蒸馏参数
            config.model.bbox_head.small_object_distiller.base_temp = params['base_temp']
            config.model.bbox_head.small_object_distiller.small_temp = params['small_temp']
            config.model.bbox_head.small_object_distiller.small_weight = params['small_weight']
            config.model.bbox_head.distill_weight = params['distill_weight']

            # 5. 基础训练参数
            config.optim_wrapper.optimizer.lr = params['learning_rate']
            config.optim_wrapper.optimizer.weight_decay = params['weight_decay']

            # 6. 显存优化设置
            # 减少验证频率
            config.train_cfg.val_interval = 2

            # 减少保存checkpoint频率
            config.default_hooks.checkpoint.interval = 2

            return config
        except AttributeError as e:
            self.logger.error(f"配置更新失败: {str(e)}")
            self.logger.error(f"请检查配置文件结构是否与代码匹配")
            raise

    def objective(self, trial: optuna.Trial) -> float:
        """优化目标函数 - 根据多个评估指标计算综合得分"""
        # 1. 获取试验参数
        params = self.suggest_parameters(trial)

        # 2. 更新配置
        config = self.base_config.copy()
        config = self.update_config(config, params)

        # 3. 设置工作目录
        trial_dir = os.path.join(self.work_dir, f'trial_{trial.number}')
        config.work_dir = trial_dir
        os.makedirs(trial_dir, exist_ok=True)

        try:
            # 4. 训练模型
            runner = Runner.from_cfg(config)
            runner.train()

            # 5. 从日志文件中解析评估指标
            import re
            import glob

            # 初始化指标值
            overall_map = sparse_map = normal_map = dense_map = small_map = medium_map = large_map = 0.0

            # 查找训练日志文件 - 修正日志文件路径
            # MMEngine通常会在工作目录下创建一个以时间戳命名的日志文件
            log_files = glob.glob(os.path.join(trial_dir, '*.log'))

            # 如果在trial目录下没有直接找到.log文件，尝试在可能的子目录中查找
            if not log_files:
                log_files = glob.glob(os.path.join(trial_dir, '**', '*.log'), recursive=True)

            # 如果还是没找到，尝试查找其他可能的日志文件
            if not log_files:
                # 查找可能的日志文件名模式
                log_files = glob.glob(os.path.join(trial_dir, '*log*'))
                log_files.extend(glob.glob(os.path.join(trial_dir, '**', '*log*'), recursive=True))

            # 打印找到的日志文件，帮助调试
            self.logger.info(f"找到的日志文件: {log_files}")

            # 如果仍然没有找到日志文件，尝试直接从runner获取评估结果
            if not log_files and hasattr(runner, 'val_evaluator') and runner.val_evaluator is not None:
                self.logger.info("未找到日志文件，尝试直接从runner获取评估结果")
                try:
                    # 尝试从runner.message_hub获取评估结果
                    if hasattr(runner, 'message_hub'):
                        message_keys = runner.message_hub.get_all_keys()
                        self.logger.info(f"可用的消息键: {message_keys}")

                        # 尝试获取bbox_mAP相关指标
                        for key in message_keys:
                            if 'bbox_mAP' in key:
                                value = runner.message_hub.get_info(key)
                                self.logger.info(f"找到指标 {key}: {value}")

                                if key == 'coco/bbox_mAP' or key == 'bbox_mAP':
                                    overall_map = float(value)
                                elif key == 'coco/bbox_mAP_s' or key == 'bbox_mAP_s':
                                    small_map = float(value)
                                elif key == 'coco/bbox_mAP_m' or key == 'bbox_mAP_m':
                                    medium_map = float(value)
                                elif key == 'coco/bbox_mAP_l' or key == 'bbox_mAP_l':
                                    large_map = float(value)
                                elif key == 'coco/bbox_mAP_sparse' or key == 'bbox_mAP_sparse':
                                    sparse_map = float(value)
                                elif key == 'coco/bbox_mAP_normal' or key == 'bbox_mAP_normal':
                                    normal_map = float(value)
                                elif key == 'coco/bbox_mAP_dense' or key == 'bbox_mAP_dense':
                                    dense_map = float(value)
                except Exception as e:
                    self.logger.warning(f"从runner获取评估结果时出错: {str(e)}")

            # 如果仍然没有找到评估结果，尝试从日志输出中解析
            if overall_map == 0.0 and log_files:
                # 从日志中提取评估指标的正则表达式
                map_pattern = r'coco/bbox_mAP: (\d+\.\d+)'
                map_s_pattern = r'coco/bbox_mAP_s: (\d+\.\d+)'
                map_m_pattern = r'coco/bbox_mAP_m: (\d+\.\d+)'
                map_l_pattern = r'coco/bbox_mAP_l: (\d+\.\d+)'
                map_sparse_pattern = r'coco/bbox_mAP_sparse: (\d+\.\d+)'
                map_normal_pattern = r'coco/bbox_mAP_normal: (\d+\.\d+)'
                map_dense_pattern = r'coco/bbox_mAP_dense: (\d+\.\d+)'

                # 读取日志文件并解析指标
                for log_file in log_files:
                    try:
                        with open(log_file, 'r') as f:
                            log_content = f.read()

                            # 查找最后一次验证的评估指标行
                            eval_lines = re.findall(r'Epoch\(val\).*coco/bbox_mAP:.*', log_content)
                            if eval_lines:
                                # 使用最后一次验证的结果
                                last_eval = eval_lines[-1]
                                self.logger.info(f"找到评估行: {last_eval}")

                                # 提取各项指标
                                map_match = re.search(map_pattern, last_eval)
                                if map_match:
                                    overall_map = float(map_match.group(1))

                                map_s_match = re.search(map_s_pattern, last_eval)
                                if map_s_match:
                                    small_map = float(map_s_match.group(1))

                                map_m_match = re.search(map_m_pattern, last_eval)
                                if map_m_match:
                                    medium_map = float(map_m_match.group(1))

                                map_l_match = re.search(map_l_pattern, last_eval)
                                if map_l_match:
                                    large_map = float(map_l_match.group(1))

                                map_sparse_match = re.search(map_sparse_pattern, last_eval)
                                if map_sparse_match:
                                    sparse_map = float(map_sparse_match.group(1))

                                map_normal_match = re.search(map_normal_pattern, last_eval)
                                if map_normal_match:
                                    normal_map = float(map_normal_match.group(1))

                                map_dense_match = re.search(map_dense_pattern, last_eval)
                                if map_dense_match:
                                    dense_map = float(map_dense_match.group(1))

                                # 找到指标后跳出循环
                                break

                            # 如果没有找到验证行，尝试从最佳检查点行提取mAP
                            if overall_map == 0.0:
                                best_ckpt_lines = re.findall(r'The best checkpoint with (\d+\.\d+) coco/bbox_mAP',
                                                             log_content)
                                if best_ckpt_lines:
                                    # 使用最后一个最佳检查点的mAP
                                    overall_map = float(best_ckpt_lines[-1])
                                    self.logger.info(f"从最佳检查点行提取到整体mAP: {overall_map}")
                    except Exception as e:
                        self.logger.warning(f"读取日志文件 {log_file} 时出错: {str(e)}")

            # 如果仍然没有找到评估结果，尝试直接查找工作目录下的任何文本文件
            if overall_map == 0.0:
                self.logger.info("尝试在工作目录下查找任何可能包含评估结果的文本文件")
                text_files = glob.glob(os.path.join(trial_dir, '**', '*.txt'), recursive=True)
                text_files.extend(glob.glob(os.path.join(trial_dir, '*.txt')))

                for text_file in text_files:
                    try:
                        with open(text_file, 'r') as f:
                            content = f.read()
                            # 查找包含mAP的行
                            map_lines = re.findall(r'.*mAP.*\d+\.\d+.*', content)
                            if map_lines:
                                self.logger.info(f"在文件 {text_file} 中找到可能的mAP行: {map_lines[-1]}")
                    except Exception:
                        pass

            # 如果仍然没有找到评估结果，查看是否有保存的检查点文件名中包含mAP值
            if overall_map == 0.0:
                ckpt_files = glob.glob(os.path.join(trial_dir, '*bbox_mAP*.pth'))
                for ckpt_file in ckpt_files:
                    try:
                        # 从文件名中提取mAP值
                        map_match = re.search(r'bbox_mAP_(\d+\.\d+)', ckpt_file)
                        if map_match:
                            overall_map = float(map_match.group(1))
                            self.logger.info(f"从检查点文件名 {ckpt_file} 提取到整体mAP: {overall_map}")
                            break
                    except Exception:
                        pass

            # 打印提取到的指标，帮助调试
            self.logger.info(f"从日志中提取的指标:")
            self.logger.info(f"  整体mAP: {overall_map}")
            self.logger.info(f"  密度分级mAP: 稀疏={sparse_map}, 普通={normal_map}, 密集={dense_map}")
            self.logger.info(f"  尺寸分级mAP: 小={small_map}, 中={medium_map}, 大={large_map}")

            # 6. 计算综合得分
            weights = self.objective_weights

            # 整体mAP权重
            score = weights['overall_map'] * overall_map

            # 加上密度分级权重
            score += weights['density_weights'].get('sparse', 0.0) * sparse_map
            score += weights['density_weights'].get('normal', 0.0) * normal_map
            score += weights['density_weights'].get('dense', 0.0) * dense_map

            # 加上尺寸分级权重
            score += weights['size_weights'].get('small', 0.0) * small_map
            score += weights['size_weights'].get('medium', 0.0) * medium_map
            score += weights['size_weights'].get('large', 0.0) * large_map

            # 7. 记录详细结果
            self.logger.info(f"Trial {trial.number} 完成:")
            self.logger.info(f"  参数: {params}")
            self.logger.info(f"  整体mAP: {overall_map:.4f}")
            self.logger.info(f"  密度分级mAP: 稀疏={sparse_map:.4f}, 普通={normal_map:.4f}, 密集={dense_map:.4f}")
            self.logger.info(f"  尺寸分级mAP: 小={small_map:.4f}, 中={medium_map:.4f}, 大={large_map:.4f}")
            self.logger.info(f"  综合得分: {score:.4f}")

            # 8. 保存详细结果到JSON文件
            result = {
                "trial_number": trial.number,
                "parameters": params,
                "metrics": {
                    "overall_map": float(overall_map),
                    "sparse_map": float(sparse_map),
                    "normal_map": float(normal_map),
                    "dense_map": float(dense_map),
                    "small_map": float(small_map),
                    "medium_map": float(medium_map),
                    "large_map": float(large_map)
                },
                "score": float(score)
            }

            with open(os.path.join(trial_dir, 'results.json'), 'w') as f:
                json.dump(result, f, indent=2)

            # 9. 清理GPU内存
            torch.cuda.empty_cache()

            return score

        except Exception as e:
            self.logger.error(f"Trial {trial.number} 失败: {str(e)}")
            import traceback
            self.logger.error(traceback.format_exc())  # 打印完整堆栈跟踪

            # 保存失败信息
            with open(os.path.join(trial_dir, 'error.txt'), 'w') as f:
                f.write(f"错误: {str(e)}\n")
                f.write(f"参数: {params}\n")
                f.write(traceback.format_exc())  # 保存完整堆栈跟踪

            # 清理GPU内存
            torch.cuda.empty_cache()
            raise optuna.TrialPruned()

    def run_optimization(self):
        """运行优化"""
        if self.remaining_trials <= 0:
            self.logger.info("所有试验已完成，无需继续优化。")
            best_trial = self.study.best_trial
            self.logger.info(f"最佳试验: {best_trial.number}, 得分: {best_trial.value:.4f}")
            self.save_optimization_history()
            return

        self.logger.info(f"开始超参数优化，将运行{self.remaining_trials}个试验...")
        self.study.optimize(self.objective, n_trials=self.remaining_trials)

        # 输出最佳结果
        try:
            best_trial = self.study.best_trial
            self.logger.info(f"\n最佳试验:")
            self.logger.info(f"  得分: {best_trial.value:.4f}")
            self.logger.info(f"  参数: ")
            for key, value in best_trial.params.items():
                self.logger.info(f"    {key}: {value}")
        except ValueError:
            self.logger.error("没有成功完成的trial，无法获取最佳结果")

        # 保存优化历史
        self.save_optimization_history()

    def save_optimization_history(self):
        """保存优化历史"""
        # 创建结果目录
        results_dir = os.path.join(self.work_dir, 'analysis')
        os.makedirs(results_dir, exist_ok=True)

        try:
            # 保存所有trials的历史记录
            trials_df = self.study.trials_dataframe()
            trials_df.to_csv(os.path.join(results_dir, 'trials_history.csv'))

            # 收集所有完成的试验详细结果
            detailed_results = []
            for trial in self.study.trials:
                if trial.state == optuna.trial.TrialState.COMPLETE:
                    trial_dir = os.path.join(self.work_dir, f'trial_{trial.number}')
                    result_file = os.path.join(trial_dir, 'results.json')
                    if os.path.exists(result_file):
                        with open(result_file, 'r') as f:
                            detailed_results.append(json.load(f))

            # 保存详细结果
            if detailed_results:
                with open(os.path.join(results_dir, 'detailed_results.json'), 'w') as f:
                    json.dump(detailed_results, f, indent=2)

                # 创建详细结果的DataFrame
                metrics_data = []
                for result in detailed_results:
                    data = {
                        'trial': result['trial_number'],
                        'score': result['score'],
                        **result['metrics'],
                        **result['parameters']
                    }
                    metrics_data.append(data)

                metrics_df = pd.DataFrame(metrics_data)
                metrics_df.to_csv(os.path.join(results_dir, 'detailed_metrics.csv'))

            # 绘制优化历史
            plt.figure(figsize=(10, 6))
            completed_trials = trials_df[~trials_df['value'].isnull()]

            if not completed_trials.empty:
                plt.plot(completed_trials['number'], completed_trials['value'], 'bo-')
                plt.xlabel('试验编号')
                plt.ylabel('综合得分')
                plt.title('优化历史')
                plt.grid(True, linestyle='--', alpha=0.7)
                plt.savefig(os.path.join(results_dir, 'optimization_history.png'))
            else:
                self.logger.warning("没有成功完成的trial，无法绘制优化历史")

            plt.close()

            # 绘制各项指标的雷达图
            if detailed_results and len(detailed_results) >= 1:
                # 获取最佳试验的指标
                best_trial_idx = trials_df['value'].idxmax() if not completed_trials.empty else None
                if best_trial_idx is not None:
                    best_trial_num = int(trials_df.loc[best_trial_idx, 'number'])
                    best_result = None
                    for result in detailed_results:
                        if result['trial_number'] == best_trial_num:
                            best_result = result
                            break

                    if best_result:
                        # 绘制雷达图
                        metrics = best_result['metrics']
                        categories = ['整体mAP', '稀疏mAP', '普通mAP', '密集mAP', '小目标mAP', '中目标mAP', '大目标mAP']
                        values = [
                            metrics['overall_map'],
                            metrics['sparse_map'],
                            metrics['normal_map'],
                            metrics['dense_map'],
                            metrics['small_map'],
                            metrics['medium_map'],
                            metrics['large_map']
                        ]

                        # 创建雷达图
                        plt.figure(figsize=(10, 8))
                        ax = plt.subplot(111, polar=True)

                        # 计算角度
                        N = len(categories)
                        angles = [n / float(N) * 2 * np.pi for n in range(N)]
                        angles += angles[:1]  # 闭合图形

                        # 添加数据
                        values += values[:1]  # 闭合数据
                        ax.plot(angles, values, 'o-', linewidth=2)
                        ax.fill(angles, values, alpha=0.25)

                        # 设置刻度和标签
                        ax.set_thetagrids(np.array(angles[:-1]) * 180 / np.pi, categories)
                        plt.title(f'最佳试验 #{best_trial_num} 的性能指标')

                        # 添加网格线和数值标签
                        ax.grid(True)
                        for i, value in enumerate(values[:-1]):
                            ax.annotate(f'{value:.3f}',
                                        xy=(angles[i], value),
                                        xytext=(angles[i], value + 0.05),
                                        ha='center')

                        plt.tight_layout()
                        plt.savefig(os.path.join(results_dir, 'best_trial_radar.png'))
                        plt.close()

            # 绘制参数重要性
            try:
                if not completed_trials.empty and len(completed_trials) >= 2:
                    param_importances = optuna.importance.get_param_importances(self.study)
                    importance_df = pd.DataFrame(list(param_importances.items()),
                                                 columns=['参数', '重要性'])

                    plt.figure(figsize=(12, 6))
                    bars = plt.bar(importance_df['参数'], importance_df['重要性'])
                    plt.xticks(rotation=45)
                    plt.title('参数重要性分析')
                    plt.xlabel('参数')
                    plt.ylabel('重要性')
                    plt.grid(True, linestyle='--', alpha=0.3, axis='y')

                    # 添加数值标签
                    for bar in bars:
                        height = bar.get_height()
                        plt.text(bar.get_x() + bar.get_width() / 2., height + 0.01,
                                 f'{height:.3f}', ha='center', va='bottom')

                    plt.tight_layout()
                    plt.savefig(os.path.join(results_dir, 'parameter_importance.png'))
                else:
                    self.logger.warning("成功完成的trial不足，无法计算参数重要性")

                plt.close()
            except Exception as e:
                self.logger.error(f"无法计算参数重要性: {str(e)}")

            # 创建最佳参数的配置文件
            if not completed_trials.empty:
                best_trial = self.study.best_trial
                best_params = best_trial.params
                best_config = self.base_config.copy()
                best_config = self.update_config(best_config, best_params)

                # 保存最佳配置
                best_config_file = os.path.join(results_dir, 'best_config.py')
                best_config.dump(best_config_file)
                self.logger.info(f"最佳配置已保存到: {best_config_file}")

        except Exception as e:
            self.logger.error(f"保存优化历史时出错: {str(e)}")
