#!/usr/bin/env python3
# -*- coding: utf-8 -*-

# 在导入任何库之前，先设置CUDA_VISIBLE_DEVICES环境变量
import os
import sys

# 读取配置文件
config_path = os.path.join(os.path.dirname(__file__), 'multi_turn_config.json')
try:
    import json
    with open(config_path, 'r', encoding='utf-8') as f:
        config_data = json.load(f)
    # 从配置文件中获取GPU设备ID
    gpu_device_id = config_data.get('gpu_device_id', 0)
    # 设置环境变量
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_device_id)
    print(f"已设置环境变量CUDA_VISIBLE_DEVICES={gpu_device_id}")
except Exception as e:
    print(f"读取配置文件或设置环境变量失败: {str(e)}")
    # 即使失败，也设置一个默认值
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"

# 现在导入其他必要的库
import json
import os
import logging
from typing import List, Dict, Any, Optional
import matplotlib.pyplot as plt
import numpy as np

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

class MultiTurnExperimentManager:
    """
    多轮对话实验管理器，负责组织和执行多轮对话实验
    """
    
    def __init__(self, config: Dict[str, Any], experiment_manager=None):
        """
        初始化多轮对话实验管理器
        
        Args:
            config: 实验配置参数
            experiment_manager: 现有的实验管理器实例（可选）
        """
        self.config = config
        # 复用现有ExperimentManager的功能
        self.experiment_manager = experiment_manager
        
        # 多轮实验特有的配置
        self.initial_history_turns = config.get('initial_history_turns', 2)
        self.max_experiment_turns = config.get('max_experiment_turns', 5)
        self.context_window_size = config.get('context_window_size', 'full')
        
        # 确保中文能正常显示
        import matplotlib.pyplot as plt
        plt.rcParams["font.family"] = ["SimHei", "WenQuanYi Micro Hei", "Heiti TC"]
        
        # 结果保存路径
        self.results_dir = config.get('results_dir', '/mnt/ssd/jsj/patient/results/multi_turn')
        os.makedirs(self.results_dir, exist_ok=True)
        
        # 初始化模型处理器
        from model_handler import ModelHandler
        from config import Config
        
        # 读取GPU设备ID配置，如果没有则使用默认值0
        gpu_device_id = self.config.get('gpu_device_id', 0)
        # 更新Config中的GPU设备ID
        Config.GPU_DEVICE_ID = gpu_device_id
        
        self.model_handler = ModelHandler(Config)
        try:
            self.model_handler.load_model(Config.MODEL_PATH)
            logger.info(f"模型加载成功，多轮对话实验将使用真实模型生成回答 (使用GPU设备 {gpu_device_id})")
        except Exception as e:
            logger.error(f"模型加载失败: {str(e)}")
            self.model_handler = None
    
    def load_patient_data(self, patient_file_path: str) -> Optional[Dict[str, Any]]:
        """
        加载患者数据文件
        
        Args:
            patient_file_path: 患者数据文件路径
        
        Returns:
            患者数据字典，如果文件不存在或加载失败则返回None
        """
        try:
            # 确保文件存在
            if not os.path.exists(patient_file_path):
                logger.error(f"患者数据文件不存在: {patient_file_path}")
                return None
            
            with open(patient_file_path, 'r', encoding='utf-8') as f:
                patient_data = json.load(f)
            
            # 数据格式转换，确保与现有代码兼容
            formatted_data = {
                '患者基础信息': patient_data.get('患者基础信息', patient_data.get('基础信息', {})),
                '患者人格': patient_data.get('患者人格', patient_data.get('人格', {})),
                '门诊对话': patient_data.get('门诊对话', []),
                '门诊病例': patient_data.get('门诊病例', ''),
                '检查检验主诊断': patient_data.get('检查检验主诊断', '')
            }
            
            return formatted_data
        except Exception as e:
            logger.error(f"加载患者数据失败: {str(e)}")
            return None
    
    def filter_high_quality_dialogues(self, dialogues: List[Dict[str, str]]) -> List[Dict[str, str]]:
        """
        筛选高质量的对话轮次
        
        Args:
            dialogues: 对话列表
        
        Returns:
            筛选后的高质量对话列表
        """
        high_quality_dialogues = []
        
        for dialogue in dialogues:
            doctor_text = dialogue.get('医生', '').strip()
            patient_text = dialogue.get('患者', '').strip()
            
            # 跳过空对话
            if not doctor_text or not patient_text:
                continue
            
            # 跳过过于简短的回答（如只有"嗯"、"哦"等）
            if len(patient_text) <= 2 and patient_text in ['嗯', '哦', '好', '是', '对']:
                continue
            
            # 检查医生提问是否包含关键词
            doctor_keywords = ['什么', '怎么', '哪里', '为什么', '如何', '多少', '多久', '症状', '感觉']
            has_keyword = any(keyword in doctor_text for keyword in doctor_keywords)
            
            # 检查患者回答是否包含有用信息
            patient_has_info = len(patient_text) > 5 or any(c in patient_text for c in ['痛', '胀', '酸', '麻', '热', '冷', '难'])
            
            # 如果对话符合质量标准，添加到高质量对话列表
            if (has_keyword or len(doctor_text) > 10) and patient_has_info:
                high_quality_dialogues.append(dialogue)
        
        return high_quality_dialogues
    
    def prepare_multi_turn_data(self, patient_data: Dict[str, Any]) -> List[Dict[str, Any]]:
        """
        准备多轮对话实验数据
        
        Args:
            patient_data: 患者数据
        
        Returns:
            多轮对话实验数据集
        """
        dialogues = patient_data.get('门诊对话', [])
        
        # 检查对话数量是否足够
        required_turns = self.initial_history_turns + self.max_experiment_turns
        if len(dialogues) < required_turns:
            logger.warning(f"对话轮次不足，无法进行多轮对话实验（需要至少{required_turns}轮对话，实际只有{len(dialogues)}轮）")
            return []
        
        # 寻找连续的高质量对话片段
        selected_dialogues = []
        
        # 遍历原始对话，寻找足够长的连续对话片段
        for i in range(len(dialogues) - required_turns + 1):
            candidate = dialogues[i:i+required_turns]
            # 筛选高质量对话
            high_quality = self.filter_high_quality_dialogues(candidate)
            # 检查是否有足够的高质量对话
            if len(high_quality) >= required_turns:
                selected_dialogues = candidate
                break
        
        # 如果没有找到足够长的连续高质量对话片段，使用所有对话
        if not selected_dialogues:
            logger.warning(f"没有找到足够长的连续高质量对话片段，使用所有对话")
            selected_dialogues = dialogues[:min(len(dialogues), required_turns)]
        
        # 划分历史对话和实验对话
        initial_history = selected_dialogues[:self.initial_history_turns]
        experiment_dialogues = selected_dialogues[self.initial_history_turns:]
        
        # 构建多轮实验数据集
        multi_turn_experiments = []
        
        for i, dialogue in enumerate(experiment_dialogues):
            experiment_data = {
                'patient_info': patient_data['患者基础信息'],
                'personality': patient_data['患者人格'],
                'medical_record': patient_data['门诊病例'],
                'diagnosis': patient_data['检查检验主诊断'],
                'history_dialogues': initial_history + experiment_dialogues[:i],
                'doctor_question': dialogue['医生'],
                'ground_truth_answer': dialogue['患者'],
                'turn_index': i + 1,
                'patient_id': patient_data['患者基础信息'].get('姓名', '未知患者')
            }
            multi_turn_experiments.append(experiment_data)
        
        return multi_turn_experiments
    
    def run_multi_turn_experiment(self, patient_file_path: str) -> Dict[str, Any]:
        """
        运行多轮对话实验
        
        Args:
            patient_file_path: 患者数据文件路径
        
        Returns:
            实验结果
        """
        # 加载患者数据
        patient_data = self.load_patient_data(patient_file_path)
        if not patient_data:
            return {}
        
        # 从文件名中提取患者ID（而不是使用患者基础信息中的姓名）
        patient_file_name = os.path.basename(patient_file_path)
        patient_id = os.path.splitext(patient_file_name)[0]  # 去掉文件扩展名
        logger.info(f"开始为患者 {patient_id} 运行多轮对话实验")
        
        # 准备多轮对话实验数据
        multi_turn_experiments = self.prepare_multi_turn_data(patient_data)
        if not multi_turn_experiments:
            logger.warning(f"为患者 {patient_id} 准备多轮对话实验数据失败")
            return {}
        
        # 初始化提示词生成器
        from generate_reply_prompt import GenerateReplyPrompt
        prompt_generator = GenerateReplyPrompt()
        
        # 处理每一轮对话
        results = []
        current_history = multi_turn_experiments[0]['history_dialogues'].copy()
        
        for experiment in multi_turn_experiments:
            logger.info(f"处理患者 {patient_id} 的第 {experiment['turn_index']} 轮对话")
            
            # 1. 生成提示词
            prompt = prompt_generator.generate_prompt(
                patient_info=experiment['patient_info'],
                doctor_question=experiment['doctor_question'],
                conversation_history=current_history,
                target_personality=experiment['personality']
            )
            
            # 2. 生成模型回答
            # 使用真实模型生成回答
            if self.model_handler:
                try:
                    model_answer = self.model_handler.generate_patient_response(
                        context_history=current_history,
                        doctor_question=experiment['doctor_question'],
                        basic_info=experiment['patient_info'],
                        personality_info=experiment['personality'],
                        medical_record=experiment['medical_record']
                    )
                    logger.info(f"使用真实模型生成回答")
                except Exception as e:
                    logger.error(f"生成模型回答失败: {str(e)}")
                    # 生成失败时使用模拟回答作为备选
                    model_answer = self._generate_mock_answer(experiment)
            else:
                # 如果模型未加载，使用模拟回答
                model_answer = self._generate_mock_answer(experiment)
            
            # 3. 应用记忆约束
            final_answer = prompt_generator.apply_memory_constraints(
                question=experiment['doctor_question'],
                base_answer=model_answer,
                persona=experiment['personality']
            )
            
            # 4. 计算评估指标
            metrics = self._calculate_metrics(
                question=experiment['doctor_question'],
                reference=experiment['ground_truth_answer'],
                generated=final_answer
            )
            
            # 5. 保存结果
            result = {
                'turn_index': experiment['turn_index'],
                'prompt': prompt,
                'doctor_question': experiment['doctor_question'],
                'model_answer': final_answer,
                'ground_truth_answer': experiment['ground_truth_answer'],
                'metrics': metrics,
                'personality': experiment['personality']
            }
            results.append(result)
            
            # 6. 更新历史对话，应用上下文窗口大小限制
            if self.context_window_size == 'full':
                # 不限制上下文窗口大小，保留所有历史对话
                current_history.append({
                    '医生': experiment['doctor_question'],
                    '患者': final_answer
                })
            elif isinstance(self.context_window_size, int):
                # 限制上下文窗口大小
                current_history.append({
                    '医生': experiment['doctor_question'],
                    '患者': final_answer
                })
                # 如果历史对话长度超过窗口大小，只保留最近的对话
                if len(current_history) > self.context_window_size:
                    current_history = current_history[-self.context_window_size:]
        
        # 构建最终的实验结果
        experiment_result = {
            'patient_id': patient_id,
            'patient_info': patient_data['患者基础信息'],
            'personality': patient_data['患者人格'],
            'experiment_results': results,
            'total_turns': len(results)
        }
        
        # 保存实验结果
        self._save_experiment_result(patient_id, experiment_result)
        
        logger.info(f"患者 {patient_id} 的多轮对话实验完成")
        return experiment_result
    
    def _generate_mock_answer(self, experiment: Dict[str, Any]) -> str:
        """
        生成模拟的模型回答（仅用于测试和调试）
        
        Args:
            experiment: 实验数据
        
        Returns:
            模拟的模型回答
        """
        ground_truth = experiment['ground_truth_answer']
        personality = experiment['personality']
        
        # 根据人格特征生成不同的模拟回答
        if personality.get('性格') == '急躁':
            return ground_truth
        elif personality.get('性格') == '内向':
            return "嗯..." + ground_truth
        elif personality.get('性格') == '啰嗦':
            return ground_truth + "，还有啊，我之前也有过类似的情况，不过没这次严重。"
        else:
            return ground_truth
    
    def _calculate_metrics(self, question: str, reference: str, generated: str) -> Dict[str, float]:
        """
        计算评估指标，确保与单轮实验保持一致的指标集合和格式
        
        Args:
            question: 医生问题
            reference: 真实回答
            generated: 生成回答
        
        Returns:
            评估指标字典
        """
        try:
            # 导入Evaluator类
            from evaluator import Evaluator
            
            # 创建评估器实例
            evaluator = Evaluator()
            
            # 使用evaluator计算所有指标
            config = {}
            results = evaluator.calculate_all_metrics(reference, generated, config)
            
            # 获取文本指标
            metrics = results.get('text_metrics', {})
            
            # 计算额外的简单指标（如length_ratio）
            length_ratio = len(generated) / max(len(reference), 1)
            metrics['length_ratio'] = float(round(length_ratio, 4))
            
            return metrics
        except Exception as e:
            logger.error(f"计算评估指标失败: {str(e)}")
            # 返回默认值，确保包含所有必要的指标
            return {
                'cosine_similarity': 0.0,
                'bertscore': 0.0,
                'bleu': 0.0,
                'rouge-1': 0.0,
                'rouge-2': 0.0,
                'rouge-l': 0.0,
                'rouge-1_p': 0.0,
                'rouge-1_r': 0.0,
                'rouge-2_p': 0.0,
                'rouge-2_r': 0.0,
                'rouge-l_p': 0.0,
                'rouge-l_r': 0.0,
                'meteor': 0.0,
                'length_ratio': 0.0
            }

    def _save_experiment_result(self, patient_id: str, result: Dict[str, Any]):
        """
        保存实验结果到文件
        
        Args:
            patient_id: 患者ID
            result: 实验结果
        """
        try:
            # 确保结果目录存在
            os.makedirs(self.results_dir, exist_ok=True)
            
            # 从模型路径中提取模型名称
            model_name = self._get_model_name_from_path(self.config.MODEL_PATH)
            
            # 保存实验结果，文件名包含模型名称
            result_file = os.path.join(self.results_dir, f"multi_turn_result_{model_name}_{patient_id}.json")
            
            # 递归处理结果中的所有浮点数和特殊类型，确保它们可以被JSON序列化
            def process_floats(obj):
                # 处理PyTorch的float32类型
                if hasattr(obj, 'dtype') and str(obj.dtype) == 'float32':
                    return float(obj)
                if isinstance(obj, float):
                    # 对于接近0的极小值，直接返回0.0
                    if abs(obj) < 1e-10:
                        return 0.0
                    # 对于整数类型的浮点数（如1.0），返回整数形式
                    if obj.is_integer():
                        return int(obj)
                    # 对于其他浮点数，保持原样返回
                    return obj
                elif isinstance(obj, dict):
                    return {k: process_floats(v) for k, v in obj.items()}
                elif isinstance(obj, list):
                    return [process_floats(item) for item in obj]
                else:
                    return obj
            
            # 处理结果中的所有浮点数
            processed_result = process_floats(result)
            
            with open(result_file, 'w', encoding='utf-8') as f:
                json.dump(processed_result, f, ensure_ascii=False, indent=2, allow_nan=False)
            
            logger.info(f"实验结果已保存到: {result_file}")
        except Exception as e:
            logger.error(f"保存实验结果失败: {str(e)}")

    def _get_model_name_from_path(self, model_path: str) -> str:
        """
        从模型路径中提取模型名称
        
        Args:
            model_path: 模型路径
        
        Returns:
            模型名称
        """
        # 从路径中提取最后一部分作为模型名称
        model_name = os.path.basename(model_path)
        
        # 如果路径以'/'结尾，basename会返回空字符串，需要特殊处理
        if not model_name:
            model_name = os.path.basename(os.path.dirname(model_path))
        
        # 移除可能的文件扩展名
        model_name = os.path.splitext(model_name)[0]
        
        return model_name
        
    def analyze_results(self, results_dir: str):
        """
        分析多轮对话实验结果，计算平均指标，按人格特征分组统计
        
        Args:
            results_dir: 结果目录
        """
        try:
            # 读取所有实验结果文件
            result_files = [f for f in os.listdir(results_dir) if f.startswith('multi_turn_result_') and f.endswith('.json')]
            
            if not result_files:
                logger.warning(f"在目录 {results_dir} 中未找到多轮实验结果文件")
                return
            
            # 初始化汇总指标字典
            total_metrics = {
                'cosine_similarity': 0.0,
                'bertscore': 0.0,
                'meteor': 0.0,
                'rouge-1': 0.0,
                'rouge-2': 0.0,
                'rouge-l': 0.0,
                'rouge-1_p': 0.0,
                'rouge-1_r': 0.0,
                'rouge-2_p': 0.0,
                'rouge-2_r': 0.0,
                'rouge-l_p': 0.0,
                'rouge-l_r': 0.0,
                'length_ratio': 0.0
            }
            total_bleu_metrics = {
                'bleu-1': 0.0,
                'bleu-2': 0.0,
                'bleu-3': 0.0,
                'bleu-4': 0.0
            }
            
            # 初始化按人格特征分组的指标字典
            metrics_by_personality = {}
            bleu_metrics_by_personality = {}
            
            total_count = 0
            
            # 遍历所有结果文件
            for result_file in result_files:
                file_path = os.path.join(results_dir, result_file)
                with open(file_path, 'r', encoding='utf-8') as f:
                    result = json.load(f)
                
                patient_id = result.get('patient_id', 'unknown')
                personality = result.get('personality', {})
                experiment_results = result.get('experiment_results', [])
                
                # 将人格特征转换为字符串用于分组
                personality_key = str(personality)
                
                # 初始化该人格特征的指标字典
                if personality_key not in metrics_by_personality:
                    metrics_by_personality[personality_key] = {k: 0.0 for k in total_metrics}
                    metrics_by_personality[personality_key]['count'] = 0
                
                if personality_key not in bleu_metrics_by_personality:
                    bleu_metrics_by_personality[personality_key] = {k: 0.0 for k in total_bleu_metrics}
                    bleu_metrics_by_personality[personality_key]['count'] = 0
                
                # 累加指标
                for exp_result in experiment_results:
                    metrics = exp_result.get('metrics', {})
                    
                    # 累加非嵌套指标
                    for key in total_metrics:
                        if key in metrics:
                            total_metrics[key] += metrics[key]
                            metrics_by_personality[personality_key][key] += metrics[key]
                    
                    # 处理嵌套的BLEU指标
                    if 'bleu' in metrics and isinstance(metrics['bleu'], dict):
                        for key in total_bleu_metrics:
                            if key in metrics['bleu']:
                                total_bleu_metrics[key] += metrics['bleu'][key]
                                bleu_metrics_by_personality[personality_key][key] += metrics['bleu'][key]
                    
                    total_count += 1
                    metrics_by_personality[personality_key]['count'] += 1
                    bleu_metrics_by_personality[personality_key]['count'] += 1
            
            # 计算平均指标
            avg_metrics = {k: v / total_count if total_count > 0 else 0.0 for k, v in total_metrics.items()}
            avg_bleu_metrics = {k: v / total_count if total_count > 0 else 0.0 for k, v in total_bleu_metrics.items()}
            
            # 合并BLEU指标到平均指标字典中
            avg_metrics['bleu'] = avg_bleu_metrics
            
            # 计算按人格特征分组的平均指标
            for personality_key in metrics_by_personality:
                count = metrics_by_personality[personality_key]['count']
                if count > 0:
                    for key in total_metrics:
                        metrics_by_personality[personality_key][key] /= count
                    del metrics_by_personality[personality_key]['count']
            
            for personality_key in bleu_metrics_by_personality:
                count = bleu_metrics_by_personality[personality_key]['count']
                if count > 0:
                    for key in total_bleu_metrics:
                        bleu_metrics_by_personality[personality_key][key] /= count
                    del bleu_metrics_by_personality[personality_key]['count']
            
            # 打印平均指标
            logger.info("多轮对话实验平均指标:")
            for key, value in avg_metrics.items():
                if key == 'bleu':
                    logger.info(f"  {key}:")
                    for bleu_key, bleu_value in value.items():
                        logger.info(f"    {bleu_key}: {bleu_value:.4f}")
                else:
                    logger.info(f"  {key}: {value:.4f}")
            
            # 生成可视化图表
            self._generate_visualizations(avg_metrics, metrics_by_personality, bleu_metrics_by_personality, results_dir)
            
            # 生成实验摘要
            self._generate_experiment_summary(results_dir, avg_metrics, metrics_by_personality, bleu_metrics_by_personality)
            
            logger.info("多轮对话实验结果分析完成")
        except Exception as e:
            logger.error(f"分析实验结果失败: {str(e)}")

    def _generate_visualizations(self, avg_metrics: Dict[str, Any], metrics_by_personality: Dict[str, Dict], bleu_metrics_by_personality: Dict[str, Dict], results_dir: str):
        """
        生成指标可视化图表
        
        Args:
            avg_metrics: 平均评估指标
            metrics_by_personality: 按人格特征分组的指标
            bleu_metrics_by_personality: 按人格特征分组的BLEU指标
            results_dir: 结果目录
        """
        try:
            # 确保中文显示正常
            plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
            plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号
            
            # 1. 生成总体平均指标条形图
            # 准备数据：将嵌套的BLEU指标展平
            display_metrics = {}
            
            # 添加常规指标
            regular_metrics = ['cosine_similarity', 'bertscore', 'meteor', 'rouge-1', 'rouge-2', 'rouge-l', 'length_ratio']
            for metric_name in regular_metrics:
                if metric_name in avg_metrics:
                    display_metrics[metric_name] = avg_metrics[metric_name]
            
            # 添加BLEU子指标
            if 'bleu' in avg_metrics and isinstance(avg_metrics['bleu'], dict):
                for key, value in avg_metrics['bleu'].items():
                    display_metrics[key] = value
            
            # 对指标进行排序
            sorted_metrics = sorted(display_metrics.items(), key=lambda x: x[1], reverse=True)
            
            # 创建条形图
            plt.figure(figsize=(15, 8))
            
            # 为每个指标创建条形
            bars = plt.bar(range(len(sorted_metrics)), [v for _, v in sorted_metrics], color='skyblue')
            
            # 设置图表属性
            plt.title('多轮对话实验平均指标', fontsize=16)
            plt.xlabel('评估指标', fontsize=14)
            plt.ylabel('指标值', fontsize=14)
            plt.xticks(range(len(sorted_metrics)), [k.replace('_', '-') for k, _ in sorted_metrics], rotation=45, ha='right')
            plt.tight_layout()
            
            # 添加数值标签
            for bar, value in zip(bars, [v for _, v in sorted_metrics]):
                height = bar.get_height()
                plt.text(bar.get_x() + bar.get_width()/2., height, f'{value:.4f}', ha='center', va='bottom')
            
            # 保存图表
            metrics_fig_path = os.path.join(results_dir, 'multi_turn_metrics_summary.png')
            plt.savefig(metrics_fig_path, dpi=300)
            plt.close()
            
            logger.info(f"总体指标可视化已保存到: {metrics_fig_path}")
            
            # 2. 生成按人格特征分组的指标对比图
            # 选择要可视化的主要指标
            personality_metrics_to_plot = ['cosine_similarity', 'bertscore', 'meteor', 'rouge-1', 'rouge-2', 'rouge-l']
            bleu_metrics_to_plot = ['bleu-1', 'bleu-2', 'bleu-3', 'bleu-4']
            
            # 计算人格特征数量
            personality_types = list(metrics_by_personality.keys())
            total_personalities = len(personality_types)
            
            # 为常规指标创建分组对比图
            if total_personalities > 0:
                # 创建图表（常规指标）
                fig, ax = plt.subplots(figsize=(16, 10))
                
                # 设置条形宽度
                width = 0.8 / total_personalities
                
                # 为每个指标创建条形组
                for i, metric_name in enumerate(personality_metrics_to_plot):
                    # 获取每个人格类型的指标值
                    values = []
                    for personality in personality_types:
                        if personality in metrics_by_personality and metric_name in metrics_by_personality[personality]:
                            values.append(metrics_by_personality[personality][metric_name])
                        else:
                            values.append(0)
                    
                    # 计算条形位置
                    x_pos = [i + j * width for j in range(total_personalities)]
                    
                    # 绘制条形
                    ax.bar(x_pos, values, width=width, label=personality_types[j] if i == 0 else "", alpha=0.7)
                
                # 设置图表属性
                ax.set_title('不同人格特征的评估指标对比', fontsize=16)
                ax.set_xlabel('评估指标', fontsize=14)
                ax.set_ylabel('指标值', fontsize=14)
                ax.set_xticks([i + 0.4 for i in range(len(personality_metrics_to_plot))])
                ax.set_xticklabels([m.replace('_', '-') for m in personality_metrics_to_plot])
                
                # 只添加一次图例
                if personality_types:
                    ax.legend(personality_types, title='人格特征')
                
                plt.tight_layout()
                
                # 保存图表
                personality_fig_path = os.path.join(results_dir, 'multi_turn_metrics_by_personality.png')
                plt.savefig(personality_fig_path, dpi=300)
                plt.close()
                
                logger.info(f"按人格特征分组的指标可视化已保存到: {personality_fig_path}")
                
                # 为BLEU指标创建分组对比图
                if bleu_metrics_by_personality:
                    fig_bleu, ax_bleu = plt.subplots(figsize=(16, 10))
                    
                    # 设置条形宽度
                    width_bleu = 0.8 / total_personalities
                    
                    # 为每个BLEU指标创建条形组
                    for i, metric_name in enumerate(bleu_metrics_to_plot):
                        # 获取每个人格类型的BLEU指标值
                        bleu_values = []
                        for personality in personality_types:
                            if personality in bleu_metrics_by_personality and metric_name in bleu_metrics_by_personality[personality]:
                                bleu_values.append(bleu_metrics_by_personality[personality][metric_name])
                            else:
                                bleu_values.append(0)
                        
                        # 计算条形位置
                        x_pos_bleu = [i + j * width_bleu for j in range(total_personalities)]
                        
                        # 绘制条形
                        ax_bleu.bar(x_pos_bleu, bleu_values, width=width_bleu, label=personality_types[j] if i == 0 else "", alpha=0.7)
                    
                    # 设置图表属性
                    ax_bleu.set_title('不同人格特征的BLEU指标对比', fontsize=16)
                    ax_bleu.set_xlabel('BLEU指标', fontsize=14)
                    ax_bleu.set_ylabel('指标值', fontsize=14)
                    ax_bleu.set_xticks([i + 0.4 for i in range(len(bleu_metrics_to_plot))])
                    ax_bleu.set_xticklabels([m.replace('_', '-') for m in bleu_metrics_to_plot])
                    
                    # 只添加一次图例
                    if personality_types:
                        ax_bleu.legend(personality_types, title='人格特征')
                    
                    plt.tight_layout()
                    
                    # 保存图表
                    bleu_fig_path = os.path.join(results_dir, 'multi_turn_bleu_metrics_by_personality.png')
                    plt.savefig(bleu_fig_path, dpi=300)
                    plt.close()
                    
                    logger.info(f"按人格特征分组的BLEU指标可视化已保存到: {bleu_fig_path}")
            
            # 保存人格特征映射关系
            personality_mapping_file = os.path.join(results_dir, 'personality_mapping.json')
            # 假设self.personality_features包含人格特征的定义
            personality_mapping = {
                'features': getattr(self, 'personality_features', []),
                'description': '人格特征映射关系'
            }
            with open(personality_mapping_file, 'w', encoding='utf-8') as f:
                json.dump(personality_mapping, f, ensure_ascii=False, indent=2)
            
            logger.info(f"人格特征映射关系已保存到: {personality_mapping_file}")
        except Exception as e:
            logger.error(f"生成可视化图表失败: {str(e)}")

# 使用示例
if __name__ == "__main__":
    # 示例配置
    config = {
        'initial_history_turns': 2,
        'max_experiment_turns': 5,
        'context_window_size': 'full',
        'results_dir': '/mnt/ssd/jsj/patient/results/multi_turn'
    }
    
    # 创建多轮对话实验管理器
    multi_turn_manager = MultiTurnExperimentManager(config)
    
    # 运行实验示例
    sample_patient_file = '/mnt/ssd/jsj/patient/dataset/10-赵瑞/10-赵瑞.json'
    
    if os.path.exists(sample_patient_file):
        logger.info(f"开始运行多轮对话实验，使用患者数据: {sample_patient_file}")
        result = multi_turn_manager.run_multi_turn_experiment(sample_patient_file)
        logger.info(f"多轮对话实验完成，结果: {result}")
    else:
        logger.error(f"示例患者数据文件不存在: {sample_patient_file}")
        logger.info("请提供有效的患者数据文件路径")

    def _generate_experiment_summary(self, results_dir: str, avg_metrics: Dict[str, Any], metrics_by_personality: Dict[str, Dict], bleu_metrics_by_personality: Dict[str, Dict]):
        """
        生成包含所有指标的实验摘要
        
        Args:
            results_dir: 结果目录
            avg_metrics: 平均评估指标
            metrics_by_personality: 按人格特征分组的指标
            bleu_metrics_by_personality: 按人格特征分组的BLEU指标
        """
        try:
            # 读取所有实验结果文件以获取每个患者的详细信息
            result_files = [f for f in os.listdir(results_dir) if f.startswith('multi_turn_result_') and f.endswith('.json')]
            
            # 构建实验摘要
            experiment_summary = {
                'total_patients': len(result_files),
                'experiment_name': 'multi_turn_dialogue_experiment',
                'config_summary': {
                    'initial_history_turns': self.initial_history_turns,
                    'max_experiment_turns': self.max_experiment_turns,
                    'context_window_size': self.context_window_size
                },
                'results_summary': []
            }
            
            # 初始化总指标累加器，用于计算所有患者的平均值
            total_metrics = {
                'cosine_similarity': 0.0,
                'bertscore': 0.0,
                'meteor': 0.0,
                'rouge-1': 0.0,
                'rouge-2': 0.0,
                'rouge-l': 0.0,
                'rouge-1_p': 0.0,
                'rouge-1_r': 0.0,
                'rouge-2_p': 0.0,
                'rouge-2_r': 0.0,
                'rouge-l_p': 0.0,
                'rouge-l_r': 0.0,
                'length_ratio': 0.0
            }
            total_bleu = {
                'bleu-1': 0.0,
                'bleu-2': 0.0,
                'bleu-3': 0.0,
                'bleu-4': 0.0
            }
            
            # 为每个患者生成结果摘要
            for result_file in result_files:
                file_path = os.path.join(results_dir, result_file)
                with open(file_path, 'r', encoding='utf-8') as f:
                    result = json.load(f)
                
                patient_id = result.get('patient_id', 'unknown')
                personality = result.get('personality', {})
                total_turns = result.get('total_turns', 0)
                
                # 计算该患者的平均指标
                patient_metrics = {
                    'cosine_similarity': 0.0,
                    'bertscore': 0.0,
                    'meteor': 0.0,
                    'rouge-1': 0.0,
                    'rouge-2': 0.0,
                    'rouge-l': 0.0,
                    'rouge-1_p': 0.0,
                    'rouge-1_r': 0.0,
                    'rouge-2_p': 0.0,
                    'rouge-2_r': 0.0,
                    'rouge-l_p': 0.0,
                    'rouge-l_r': 0.0,
                    'length_ratio': 0.0
                }
                patient_bleu = {
                    'bleu-1': 0.0,
                    'bleu-2': 0.0,
                    'bleu-3': 0.0,
                    'bleu-4': 0.0
                }
                
                # 累加每个患者的所有轮次指标
                for exp_result in result.get('experiment_results', []):
                    metrics = exp_result.get('metrics', {})
                    
                    # 累加非嵌套指标
                    for key in patient_metrics:
                        if key in metrics:
                            patient_metrics[key] += metrics[key]
                    
                    # 处理嵌套的BLEU指标
                    if 'bleu' in metrics and isinstance(metrics['bleu'], dict):
                        for key in patient_bleu:
                            if key in metrics['bleu']:
                                patient_bleu[key] += metrics['bleu'][key]
                
                # 计算平均值
                if total_turns > 0:
                    for key in patient_metrics:
                        patient_metrics[key] = round(patient_metrics[key] / total_turns, 4)
                    for key in patient_bleu:
                        patient_bleu[key] = round(patient_bleu[key] / total_turns, 4)
                
                # 将BLEU指标合并到主指标字典中
                patient_metrics['bleu'] = patient_bleu
                
                # 累加患者的平均指标到总指标中
                for key in total_metrics:
                    if key in patient_metrics:
                        total_metrics[key] += patient_metrics[key]
                if 'bleu' in patient_metrics and isinstance(patient_metrics['bleu'], dict):
                    for key in total_bleu:
                        if key in patient_metrics['bleu']:
                            total_bleu[key] += patient_metrics['bleu'][key]
                
                # 添加患者结果摘要
                patient_summary = {
                    'patient_id': patient_id,
                    'personality': personality,
                    'total_turns': total_turns,
                    'metrics_summary': patient_metrics
                }
                experiment_summary['results_summary'].append(patient_summary)
            
            # 计算所有患者的平均指标
            total_patients = len(experiment_summary['results_summary'])
            if total_patients > 0:
                overall_metrics = {}
                for key in total_metrics:
                    overall_metrics[key] = round(total_metrics[key] / total_patients, 4)
                
                # 计算BLEU指标的平均值
                overall_bleu = {}
                for key in total_bleu:
                    overall_bleu[key] = round(total_bleu[key] / total_patients, 4)
                
                # 合并BLEU指标
                overall_metrics['bleu'] = overall_bleu
                
                # 添加所有患者的总平均指标到实验摘要
                experiment_summary['overall_metrics_average'] = overall_metrics
            
            # 保存实验摘要
            # 从模型路径中提取模型名称
            model_name = self._get_model_name_from_path(self.config.MODEL_PATH)
            
            # 保存实验摘要，文件名包含模型名称
            summary_file = os.path.join(results_dir, f'experiment_summary_{model_name}.json')
            
            # 递归处理结果中的所有浮点数，确保它们以小数形式显示
            def process_floats_for_summary(obj):
                if isinstance(obj, float):
                    # 对于接近0的极小值，直接显示为0.0
                    if abs(obj) < 1e-10:
                        return 0.0
                    # 对于较大的数值或需要保留小数的数值，格式化为字符串
                    # 这样可以避免JSON序列化时自动使用科学计数法
                    # 先尝试格式化为10位小数，如果是整数则去掉小数点
                    formatted = "{0:.10f}".format(obj)
                    if formatted.endswith('.0000000000'):
                        return float(formatted[:-10] + '.0')
                    else:
                        # 去掉末尾的0和可能的小数点
                        formatted = formatted.rstrip('0').rstrip('.') if '.' in formatted else formatted
                        return float(formatted)
                elif isinstance(obj, dict):
                    return {k: process_floats_for_summary(v) for k, v in obj.items()}
                elif isinstance(obj, list):
                    return [process_floats_for_summary(item) for item in obj]
                else:
                    return obj
            
            # 处理结果中的所有浮点数
            processed_summary = process_floats_for_summary(experiment_summary)
            
            with open(summary_file, 'w', encoding='utf-8') as f:
                json.dump(processed_summary, f, ensure_ascii=False, indent=2, allow_nan=False)
            
            logger.info(f"实验摘要已保存到: {summary_file}")
        except Exception as e:
            logger.error(f"生成实验摘要失败: {str(e)}")