# tools/sensitivity_analysis.py

import os
import numpy as np
import torch
import pandas as pd
import matplotlib.pyplot as plt
from typing import Dict, List, Union, Optional
import seaborn as sns
from mmengine.config import Config
from mmengine.runner import Runner
from mmengine.logging import MMLogger
import json
from matplotlib.gridspec import GridSpec
from matplotlib.ticker import MaxNLocator


class SensitivityAnalyzer:
    def __init__(
            self,
            base_config_path: str,
            work_dir: str,
            param_ranges: Dict[str, List[float]],
            n_samples: int = 5,
            metric_weights: Optional[Dict[str, float]] = None,
            save_checkpoints: bool = False
    ):
        """
        初始化敏感性分析器

        Args:
            base_config_path: 基础配置文件路径
            work_dir: 工作目录
            param_ranges: 参数范围字典，格式为 {参数名: [值列表]}
            n_samples: 如果参数范围未指定，则生成的样本数量
            metric_weights: 评估指标权重字典，用于计算综合得分
            save_checkpoints: 是否保存检查点
        """
        self.base_config = Config.fromfile(base_config_path)
        self.work_dir = work_dir
        os.makedirs(work_dir, exist_ok=True)

        # 处理参数范围
        self.param_ranges = {}
        for param_name, param_range in param_ranges.items():
            if isinstance(param_range, list):
                self.param_ranges[param_name] = param_range
            elif isinstance(param_range, dict):
                # 支持字典格式: {'min': 0.1, 'max': 0.9, 'step': 0.2}
                start = param_range.get('min', 0)
                end = param_range.get('max', 1)
                step = param_range.get('step', None)
                if step:
                    self.param_ranges[param_name] = np.arange(start, end + step / 2, step).tolist()
                else:
                    self.param_ranges[param_name] = np.linspace(start, end, n_samples).tolist()
            else:
                raise ValueError(f"参数范围格式不支持: {param_range}")

        # 设置评估指标权重
        self.metric_weights = metric_weights or {
            'overall_map': 0.4,
            'sparse_map': 0.2,
            'normal_map': 0.2,
            'dense_map': 0.2
        }

        # 检查权重总和是否为1
        weight_sum = sum(self.metric_weights.values())
        if abs(weight_sum - 1.0) > 1e-6:
            print(f"警告: 权重总和 ({weight_sum}) 不等于1.0，将进行归一化")
            for key in self.metric_weights:
                self.metric_weights[key] /= weight_sum

        self.save_checkpoints = save_checkpoints
        self.n_samples = n_samples

        # 存储分析结果
        self.sensitivity_results = {}

        # 设置日志
        self.logger = MMLogger.get_instance(
            name='SensitivityAnalysis',
            log_file=os.path.join(work_dir, 'sensitivity_analysis.log')
        )

        self.logger.info(f"初始化敏感性分析器，基础配置: {base_config_path}")
        self.logger.info(f"参数范围: {json.dumps(self.param_ranges, indent=2)}")
        self.logger.info(f"评估指标权重: {json.dumps(self.metric_weights, indent=2)}")

    def analyze_parameter_sensitivity(self):
        """执行敏感性分析"""
        # 创建结果目录
        results_dir = os.path.join(self.work_dir, 'sensitivity_analysis')
        os.makedirs(results_dir, exist_ok=True)

        # 分析每个参数
        for param_name, param_range in self.param_ranges.items():
            self.logger.info(f"分析参数: {param_name}, 范围: {param_range}")
            self.sensitivity_results[param_name] = []

            # 对每个参数值运行评估
            for value in param_range:
                # 更新配置
                config = self.base_config.copy()
                self._update_config_param(config, param_name, value)

                # 设置工作目录
                param_dir = os.path.join(self.work_dir, f"{param_name.replace('.', '_')}_{value}")
                config.work_dir = param_dir
                os.makedirs(param_dir, exist_ok=True)

                # 如果不保存检查点，则减少保存频率
                if not self.save_checkpoints:
                    config.default_hooks.checkpoint.interval = 999999

                # 运行评估
                try:
                    self.logger.info(f"评估 {param_name}={value}")
                    runner = Runner.from_cfg(config)
                    runner.train()
                    metrics = runner.val_evaluator.metrics

                    # 提取所有相关指标
                    metrics_dict = metrics[0] if isinstance(metrics, list) else metrics

                    # 提取各项指标
                    overall_map = metrics_dict.get('coco/bbox_mAP', 0.0)
                    sparse_map = metrics_dict.get('coco/bbox_mAP_sparse', 0.0)
                    normal_map = metrics_dict.get('coco/bbox_mAP_normal', 0.0)
                    dense_map = metrics_dict.get('coco/bbox_mAP_dense', 0.0)
                    small_map = metrics_dict.get('coco/bbox_mAP_s', 0.0)
                    medium_map = metrics_dict.get('coco/bbox_mAP_m', 0.0)
                    large_map = metrics_dict.get('coco/bbox_mAP_l', 0.0)

                    # 计算综合得分
                    score = (
                            self.metric_weights.get('overall_map', 0.4) * overall_map +
                            self.metric_weights.get('sparse_map', 0.2) * sparse_map +
                            self.metric_weights.get('normal_map', 0.2) * normal_map +
                            self.metric_weights.get('dense_map', 0.2) * dense_map +
                            self.metric_weights.get('small_map', 0.0) * small_map +
                            self.metric_weights.get('medium_map', 0.0) * medium_map +
                            self.metric_weights.get('large_map', 0.0) * large_map
                    )

                    # 记录结果
                    result = {
                        'value': value,
                        'score': score,
                        'overall_map': overall_map,
                        'sparse_map': sparse_map,
                        'normal_map': normal_map,
                        'dense_map': dense_map,
                        'small_map': small_map,
                        'medium_map': medium_map,
                        'large_map': large_map
                    }

                    self.sensitivity_results[param_name].append(result)
                    self.logger.info(f"结果: {result}")

                    # 保存单个参数结果
                    with open(os.path.join(param_dir, 'result.json'), 'w') as f:
                        json.dump(result, f, indent=2)

                except Exception as e:
                    self.logger.error(f"评估 {param_name}={value} 失败: {str(e)}")

                # 清理GPU内存
                torch.cuda.empty_cache()

            # 保存参数分析结果
            param_results_file = os.path.join(results_dir, f"{param_name.replace('.', '_')}_results.json")
            with open(param_results_file, 'w') as f:
                json.dump(self.sensitivity_results[param_name], f, indent=2)

            # 单独可视化每个参数
            self._visualize_parameter_sensitivity(param_name, results_dir)

        # 分析和可视化所有结果
        self.visualize_sensitivity(results_dir)

    def _update_config_param(self, config: Config, param_name: str, value: float):
        """更新配置中的参数值"""
        # 解析参数路径
        param_path = param_name.split('.')

        # 使用递归方法更新嵌套配置
        self._update_nested_config(config, param_path, value)

    def _update_nested_config(self, config_obj, path_parts, value):
        """递归更新嵌套配置"""
        if len(path_parts) == 1:
            # 最后一级，直接设置值
            setattr(config_obj, path_parts[0], value)
            return

        # 获取下一级对象
        next_obj = getattr(config_obj, path_parts[0])
        # 递归更新下一级
        self._update_nested_config(next_obj, path_parts[1:], value)

    def _visualize_parameter_sensitivity(self, param_name: str, results_dir: str):
        """可视化单个参数的敏感性分析结果"""
        results = self.sensitivity_results[param_name]
        if not results:
            self.logger.warning(f"参数 {param_name} 没有有效结果，跳过可视化")
            return

        # 提取数据
        values = [r['value'] for r in results]
        scores = [r['score'] for r in results]
        overall_maps = [r['overall_map'] for r in results]
        sparse_maps = [r['sparse_map'] for r in results]
        normal_maps = [r['normal_map'] for r in results]
        dense_maps = [r['dense_map'] for r in results]

        # 创建多面板图
        fig = plt.figure(figsize=(15, 10))
        gs = GridSpec(2, 2, figure=fig)

        # 1. 综合得分曲线
        ax1 = fig.add_subplot(gs[0, 0])
        ax1.plot(values, scores, 'o-', color='blue', linewidth=2)
        ax1.set_xlabel(f'参数值: {param_name}')
        ax1.set_ylabel('综合得分')
        ax1.set_title(f'参数 {param_name} 对综合得分的影响')
        ax1.grid(True, linestyle='--', alpha=0.7)
        # 标记最佳点
        best_idx = scores.index(max(scores))
        ax1.plot(values[best_idx], scores[best_idx], 'ro', markersize=10)
        ax1.annotate(f'最佳: {values[best_idx]:.4f}',
                     xy=(values[best_idx], scores[best_idx]),
                     xytext=(0, 10),
                     textcoords='offset points',
                     ha='center')

        # 2. 整体mAP曲线
        ax2 = fig.add_subplot(gs[0, 1])
        ax2.plot(values, overall_maps, 'o-', color='green', linewidth=2)
        ax2.set_xlabel(f'参数值: {param_name}')
        ax2.set_ylabel('整体mAP')
        ax2.set_title(f'参数 {param_name} 对整体mAP的影响')
        ax2.grid(True, linestyle='--', alpha=0.7)

        # 3. 密度分级mAP曲线
        ax3 = fig.add_subplot(gs[1, :])
        ax3.plot(values, sparse_maps, 'o-', label='稀疏场景', linewidth=2)
        ax3.plot(values, normal_maps, 'o-', label='普通场景', linewidth=2)
        ax3.plot(values, dense_maps, 'o-', label='密集场景', linewidth=2)
        ax3.set_xlabel(f'参数值: {param_name}')
        ax3.set_ylabel('密度分级mAP')
        ax3.set_title(f'参数 {param_name} 对不同密度场景的影响')
        ax3.legend()
        ax3.grid(True, linestyle='--', alpha=0.7)

        # 调整布局
        plt.tight_layout()
        plt.savefig(os.path.join(results_dir, f"{param_name.replace('.', '_')}_analysis.png"))
        plt.close()

        # 创建数据表格
        df = pd.DataFrame(results)
        df.to_csv(os.path.join(results_dir, f"{param_name.replace('.', '_')}_data.csv"), index=False)

    def visualize_sensitivity(self, results_dir: str):
        """可视化所有敏感性分析结果"""
        if not self.sensitivity_results:
            self.logger.warning("没有敏感性分析结果，跳过可视化")
            return

        # 1. 计算敏感性得分
        sensitivity_scores = {}
        density_sensitivity = {}

        for param_name, results in self.sensitivity_results.items():
            if not results:
                continue

            # 提取数据
            scores = np.array([r['score'] for r in results])
            overall_maps = np.array([r['overall_map'] for r in results])
            sparse_maps = np.array([r['sparse_map'] for r in results])
            normal_maps = np.array([r['normal_map'] for r in results])
            dense_maps = np.array([r['dense_map'] for r in results])

            # 计算敏感性得分 (标准差/均值)
            if scores.mean() > 0:
                sensitivity_scores[param_name] = scores.std() / scores.mean()
            else:
                sensitivity_scores[param_name] = 0

            # 计算对不同密度场景的敏感性
            density_sensitivity[param_name] = {
                'sparse': sparse_maps.std() / sparse_maps.mean() if sparse_maps.mean() > 0 else 0,
                'normal': normal_maps.std() / normal_maps.mean() if normal_maps.mean() > 0 else 0,
                'dense': dense_maps.std() / dense_maps.mean() if dense_maps.mean() > 0 else 0
            }

        # 2. 绘制敏感性热图
        if sensitivity_scores:
            plt.figure(figsize=(10, 6))
            sensitivity_df = pd.DataFrame([sensitivity_scores]).T
            sensitivity_df.columns = ['敏感性得分']
            sensitivity_df = sensitivity_df.sort_values('敏感性得分', ascending=False)

            sns.heatmap(
                sensitivity_df,
                annot=True,
                cmap='YlOrRd',
                fmt='.3f',
                cbar_kws={'label': '敏感性得分 (标准差/均值)'}
            )
            plt.title('参数敏感性得分')
            plt.tight_layout()
            plt.savefig(os.path.join(results_dir, 'sensitivity_heatmap.png'))
            plt.close()

            # 保存敏感性得分
            sensitivity_df.to_csv(os.path.join(results_dir, 'sensitivity_scores.csv'))

        # 3. 绘制密度分级敏感性热图
        if density_sensitivity:
            density_df = pd.DataFrame({
                param: [data['sparse'], data['normal'], data['dense']]
                for param, data in density_sensitivity.items()
            }, index=['稀疏场景', '普通场景', '密集场景'])

            plt.figure(figsize=(12, 8))
            sns.heatmap(
                density_df,
                annot=True,
                cmap='YlGnBu',
                fmt='.3f',
                cbar_kws={'label': '敏感性得分 (标准差/均值)'}
            )
            plt.title('参数对不同密度场景的敏感性')
            plt.tight_layout()
            plt.savefig(os.path.join(results_dir, 'density_sensitivity_heatmap.png'))
            plt.close()

            # 保存密度敏感性得分
            density_df.to_csv(os.path.join(results_dir, 'density_sensitivity_scores.csv'))

        # 4. 绘制参数重要性条形图
        if sensitivity_scores:
            plt.figure(figsize=(12, 8))
            sensitivity_series = pd.Series(sensitivity_scores).sort_values(ascending=False)
            bars = plt.bar(sensitivity_series.index, sensitivity_series.values, color='skyblue')

            # 添加数值标签
            for bar in bars:
                height = bar.get_height()
                plt.text(bar.get_x() + bar.get_width() / 2., height + 0.01,
                         f'{height:.3f}', ha='center', va='bottom')

            plt.xlabel('参数')
            plt.ylabel('敏感性得分')
            plt.title('参数敏感性排序')
            plt.xticks(rotation=45, ha='right')
            plt.grid(True, linestyle='--', alpha=0.3, axis='y')
            plt.tight_layout()
            plt.savefig(os.path.join(results_dir, 'sensitivity_barplot.png'))
            plt.close()

        # 5. 生成综合报告
        self._generate_sensitivity_report(results_dir)

    def _generate_sensitivity_report(self, results_dir: str):
        """生成敏感性分析综合报告"""
        report = {
            "参数敏感性分析报告": {
                "分析时间": pd.Timestamp.now().strftime('%Y-%m-%d %H:%M:%S'),
                "参数范围": self.param_ranges,
                "评估指标权重": self.metric_weights,
                "参数建议": self.get_recommendations(),
                "详细结果": {}
            }
        }

        # 添加每个参数的详细结果
        for param_name, results in self.sensitivity_results.items():
            if not results:
                continue

            # 找出最佳值
            best_result = max(results, key=lambda x: x['score'])

            # 计算敏感性
            scores = np.array([r['score'] for r in results])
            sensitivity = scores.std() / scores.mean() if scores.mean() > 0 else 0

            # 添加到报告
            report["参数敏感性分析报告"]["详细结果"][param_name] = {
                "最佳值": best_result['value'],
                "最佳得分": best_result['score'],
                "敏感性得分": sensitivity,
                "最佳性能指标": {
                    "整体mAP": best_result['overall_map'],
                    "稀疏场景mAP": best_result['sparse_map'],
                    "普通场景mAP": best_result['normal_map'],
                    "密集场景mAP": best_result['dense_map']
                }
            }

        # 保存报告
        with open(os.path.join(results_dir, 'sensitivity_report.json'), 'w', encoding='utf-8') as f:
            json.dump(report, f, indent=2, ensure_ascii=False)

        # 生成Markdown报告
        with open(os.path.join(results_dir, 'sensitivity_report.md'), 'w', encoding='utf-8') as f:
            f.write("# 参数敏感性分析报告\n\n")
            f.write(f"分析时间: {report['参数敏感性分析报告']['分析时间']}\n\n")

            f.write("## 参数建议\n\n")
            f.write("| 参数 | 建议值 |\n")
            f.write("|------|-------|\n")
            for param, value in report['参数敏感性分析报告']['参数建议'].items():
                f.write(f"| {param} | {value} |\n")

            f.write("\n## 敏感性分析结果\n\n")
            f.write("| 参数 | 敏感性得分 | 最佳值 | 最佳得分 | 整体mAP | 稀疏mAP | 普通mAP | 密集mAP |\n")
            f.write("|------|-----------|--------|----------|---------|---------|---------|--------|\n")

            for param, details in report['参数敏感性分析报告']['详细结果'].items():
                f.write(
                    f"| {param} | {details['敏感性得分']:.4f} | {details['最佳值']} | {details['最佳得分']:.4f} | " +
                    f"{details['最佳性能指标']['整体mAP']:.4f} | {details['最佳性能指标']['稀疏场景mAP']:.4f} | " +
                    f"{details['最佳性能指标']['普通场景mAP']:.4f} | {details['最佳性能指标']['密集场景mAP']:.4f} |\n")

    def get_recommendations(self) -> Dict[str, float]:
        """基于敏感性分析生成参数建议"""
        recommendations = {}

        for param_name, results in self.sensitivity_results.items():
            if not results:
                continue

            # 找出性能最好的参数值
            best_result = max(results, key=lambda x: x['score'])
            recommendations[param_name] = best_result['value']

        return recommendations


# 使用示例
if __name__ == "__main__":
    import argparse

    parser = argparse.ArgumentParser(description='参数敏感性分析')
    parser.add_argument('--config', required=True, help='基础配置文件路径')
    parser.add_argument('--work-dir', required=True, help='工作目录')
    parser.add_argument('--params', required=True, help='参数配置JSON文件路径')
    parser.add_argument('--weights', default=None, help='评估指标权重JSON文件路径')
    parser.add_argument('--save-checkpoints', action='store_true', help='是否保存检查点')
    args = parser.parse_args()

    # 加载参数配置
    with open(args.params, 'r') as f:
        param_ranges = json.load(f)

    # 加载评估指标权重
    metric_weights = None
    if args.weights:
        with open(args.weights, 'r') as f:
            metric_weights = json.load(f)

    # 创建敏感性分析器
    analyzer = SensitivityAnalyzer(
        base_config_path=args.config,
        work_dir=args.work_dir,
        param_ranges=param_ranges,
        metric_weights=metric_weights,
        save_checkpoints=args.save_checkpoints
    )

    # 运行敏感性分析
    analyzer.analyze_parameter_sensitivity()

    # 获取参数建议
    recommendations = analyzer.get_recommendations()
    print("参数建议:")
    for param, value in recommendations.items():
        print(f"  {param}: {value}")
