# -*- coding: utf-8 -*-

import os
import time
import logging
import json
import pickle
import hashlib
from typing import Optional, List, Dict, Any, Union
from datetime import datetime

# 设置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# 导入自定义模块
from data_processor import DataProcessor
from model_handler import ModelHandler
from evaluator import get_evaluator_instance
from result_manager import ResultManager


class ExperimentManager:
    """
    实验管理器类，负责执行实验配置并收集结果
    """
    
    def __init__(self, config):
        """
        初始化实验管理器
        
        Args:
            config: 实验配置
        """
        self.config = config
        self.data_processor = DataProcessor(config)
        self.model_handler = ModelHandler(config)  # 这里会自动选择GPU并设置CUDA_VISIBLE_DEVICES
        
        # 使用全局Evaluator实例，避免重复加载模型
        self.evaluator = get_evaluator_instance(config)
        
        # 初始化结果管理器
        self.result_manager = ResultManager(config)
        
        # 初始化模型和分词器为None，稍后在run_experiments中加载
        self.tokenizer = None
        self.model = None
        
        # 创建缓存目录用于存储实验数据
        self.experiment_data_cache_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../cache/experiment_data')
        os.makedirs(self.experiment_data_cache_dir, exist_ok=True)
        logger.info(f"实验数据缓存目录已创建: {self.experiment_data_cache_dir}")
        
        # 创建断点信息存储目录
        self.checkpoint_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../cache/checkpoints')
        os.makedirs(self.checkpoint_dir, exist_ok=True)
        logger.info(f"断点信息存储目录已创建: {self.checkpoint_dir}")
    
    
    def run_experiments(self, context_windows=None, num_samples=None) -> List[Dict[str, Any]]:
        """
        执行所有实验配置
        
        Args:
            context_windows: 上下文窗口大小列表
            num_samples: 测试样本数量
        
        Returns:
            所有实验结果的列表
        """
        all_results = []
        
        # 获取上下文窗口大小实验组
        if context_windows is None:
            context_windows = self.config.EXPERIMENT_CONFIG.get("context_windows", [2, 4, 6, "full", "semantic"])
        
        # 确定使用的样本数量：优先使用外部传入的num_samples，否则使用配置中的MAX_SAMPLES
        sample_count = num_samples if num_samples is not None else getattr(self.config, 'MAX_SAMPLES', None)
        
        # 加载模型 - 在GPU设备选择完成后加载，避免重复加载
        try:
            self.model_handler.load_model(self.config.MODEL_PATH)
        except Exception as e:
            logger.error(f"模型加载失败: {str(e)}")
            return all_results
            
        # 加载患者数据
        patient_data_list = self.data_processor.load_patient_data(self.config.DATA_DIR, sample_count)
        if not patient_data_list:
            logger.error("未能加载患者数据")
            return all_results
        
        logger.info(f"成功加载 {len(patient_data_list)} 条患者数据")

        
        # 如果需要语义检索，创建语义检索函数
        semantic_search_func = None
        if "semantic" in context_windows:
            semantic_search_func = self.data_processor.get_semantic_search_function()
            if not semantic_search_func:
                logger.warning("语义检索功能不可用，将跳过semantic实验组")
                context_windows = [window for window in context_windows if window != "semantic"]
        
        # 为每个实验组执行实验
        for window_size in context_windows:
            logger.info(f"开始执行实验组：窗口大小 = {window_size}")
            start_time = time.time()
            
            try:
                # 准备实验数据
                experiment_data = self.prepare_experiment_data(
                    patient_data_list, 
                    window_size, 
                    semantic_search_func
                )
                
                if not experiment_data:
                    logger.warning(f"实验组 {window_size} 没有可用的实验数据")
                    continue
                
                logger.info(f"实验组 {window_size} 准备了 {len(experiment_data)} 条实验数据")
                logger.info(f"实验数据总量已达到: {len(experiment_data)} 条")
                
                logger.info("使用串行处理模式")
                batch_results = self._process_batch(
                    experiment_data, 
                    window_size
                )
                
                # 收集实验结果用于返回
                experiment_results = {
                    "experiment_id": f"context_window_{window_size}",
                    "context_window": window_size,
                    "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
                    "total_samples": len(batch_results),
                    "results": batch_results,
                    "execution_time": round(time.time() - start_time, 2)
                }
                
                all_results.append(experiment_results)
                logger.info(f"实验组 {window_size} 执行完成，耗时: {experiment_results['execution_time']} 秒")
                
            except Exception as e:
                logger.error(f"实验组 {window_size} 执行失败: {str(e)}")
        
        return all_results
    
    
    def _get_experiment_data_cache_key(self, window_size: Union[int, str], sample_count: int) -> str:
        """
        生成实验数据缓存的唯一键
        
        Args:
            window_size: 上下文窗口大小
            sample_count: 样本数量
        
        Returns:
            缓存键字符串
        """
        # 构建基础字符串 - 确保包括所有影响实验数据的因素
        base_str = f"{self.config.DATA_DIR}_{window_size}_{sample_count}_{self.config.MODEL_PATH}_{str(getattr(self.config, 'MIN_DIALOGUE_LENGTH', 6))}"
        
        # 计算哈希值
        hash_obj = hashlib.md5(base_str.encode())
        cache_key = hash_obj.hexdigest()
        logger.debug(f"生成缓存键: {cache_key} 用于窗口大小: {window_size}, 样本数: {sample_count}")
        return cache_key
    
    def _save_experiment_data_to_cache(self, experiment_data: List[Dict[str, Any]], cache_key: str) -> None:
        """
        将实验数据保存到缓存文件
        
        Args:
            experiment_data: 实验数据列表
            cache_key: 缓存键
        """
        cache_file = os.path.join(self.experiment_data_cache_dir, f"{cache_key}.pkl")
        
        try:
            with open(cache_file, 'wb') as f:
                pickle.dump(experiment_data, f)
            logger.info(f"成功将 {len(experiment_data)} 条实验数据保存到缓存: {cache_file}")
        except Exception as e:
            logger.error(f"保存实验数据到缓存失败: {str(e)}")
    
    def _load_experiment_data_from_cache(self, cache_key: str) -> Optional[List[Dict[str, Any]]]:
        """
        从缓存文件加载实验数据
        
        Args:
            cache_key: 缓存键
        
        Returns:
            实验数据列表，如果缓存不存在则返回None
        """
        cache_file = os.path.join(self.experiment_data_cache_dir, f"{cache_key}.pkl")
        
        if not os.path.exists(cache_file):
            return None
        
        try:
            with open(cache_file, 'rb') as f:
                experiment_data = pickle.load(f)
            logger.info(f"成功从缓存加载 {len(experiment_data)} 条实验数据: {cache_file}")
            return experiment_data
        except Exception as e:
            logger.error(f"从缓存加载实验数据失败: {str(e)}")
            return None
    
    def prepare_experiment_data(
        self,
        patient_data_list: List[Dict[str, Any]],
        window_size: Union[int, str],
        semantic_search_func=None
    ) -> List[Dict[str, Any]]:
        """
        为指定的实验组准备数据，优先从缓存加载
        """
        # 生成缓存键
        sample_count = len(patient_data_list)
        cache_key = self._get_experiment_data_cache_key(window_size, sample_count)
        
        # 尝试从缓存加载数据
        cached_data = self._load_experiment_data_from_cache(cache_key)
        if cached_data:
            logger.info(f"成功从缓存加载 {len(cached_data)} 条实验数据，避免重复准备")
            return cached_data
        
        # 缓存不存在，准备新数据
        experiment_data = []
        
        for patient_data in patient_data_list:
            # 为每个患者准备上下文数据
            patient_context_data = self.data_processor.prepare_context_data(
                patient_data, 
                window_size, 
                semantic_search_func
            )
            experiment_data.extend(patient_context_data)
        
        # 检查是否有该实验组的最大样例数限制
        if hasattr(self.config, 'GROUP_MAX_SAMPLES'):
            group_max_samples = self.config.GROUP_MAX_SAMPLES.get(window_size)
            if group_max_samples is not None and len(experiment_data) > group_max_samples:
                logger.info(f"对实验组 {window_size} 应用最大样例数限制: {group_max_samples}")
                experiment_data = experiment_data[:group_max_samples]
        
        # 保存数据到缓存
        self._save_experiment_data_to_cache(experiment_data, cache_key)
        
        return experiment_data
    
    
    def get_model_name_from_path(self, model_path: str) -> str:
        """
        从模型路径中提取模型名称
        
        Args:
            model_path: 模型路径
            
        Returns:
            提取的模型名称
        """
        # 从路径中提取最后一部分作为模型名称
        model_name = os.path.basename(model_path)
        # 如果提取的名称为空或太短，则使用默认名称
        if not model_name or len(model_name) < 2:
            model_name = "default_model"
        return model_name

    def _process_batch(
        self,
        experiment_data: List[Dict[str, Any]],
        window_size: Union[int, str]
    ) -> List[Dict[str, Any]]:
        """
        批量处理实验数据的通用方法（支持断点继续）
        
        Args:
            experiment_data: 实验数据列表
            window_size: 上下文窗口大小
            processing_mode: 处理模式，"sequential"或"parallel"
            
        Returns:
            处理结果列表
        """
        results = []
        total_samples = len(experiment_data)
        
        # 确定批处理大小
        batch_size = self._get_batch_size()
        
        # 加载断点信息
        checkpoint_data = self._load_checkpoint(window_size)
        start_index = checkpoint_data.get("processed_count", 0)
        
        if start_index > 0:
            logger.info(f"从断点处继续处理，跳过前 {start_index} 个样本")
            # 跳过已处理的样本
            experiment_data = experiment_data[start_index:]
            remaining_samples = len(experiment_data)
            if remaining_samples <= 0:
                logger.info("所有样本已处理完毕，无需继续")
                # 删除断点文件
                checkpoint_file = self._get_checkpoint_file(window_size)
                if os.path.exists(checkpoint_file):
                    os.remove(checkpoint_file)
                    logger.info(f"已删除断点文件: {checkpoint_file}")
                return []
            
            # 更新总批次
            total_batches = (remaining_samples + batch_size - 1) // batch_size
            logger.info(f"继续处理实验组 {window_size}，总批次数: {total_batches}，每批次大小: {batch_size}")
        else:
            total_batches = (total_samples + batch_size - 1) // batch_size
            logger.info(f"开始处理实验组 {window_size}，总批次数: {total_batches}，每批次大小: {batch_size}")
        
        # 用于计算预计时间的变量
        total_processing_time = 0
        processed_batches = 0
        
        # 实验信息，用于保存到结果文件
        experiment_info = self._prepare_experiment_info(window_size)
        
        # 从模型路径中提取模型名称并创建输出文件路径
        output_file = self._prepare_output_file(window_size)
        
        # 按批次处理数据
        for i in range(0, len(experiment_data), batch_size):
            batch = experiment_data[i:i+batch_size]
            current_batch = i//batch_size + 1
            
            # 记录批次处理开始时间
            batch_start_time = time.time()

            # 批量生成回答
            generated_responses = self.model_handler.batch_generate_responses(
                batch
            )

            # 处理当前批次结果
            batch_results = self._process_batch_results(
                batch, 
                generated_responses, 
                batch_start_time,
                window_size
            )
            
            # 将当前批次结果添加到总结果中
            results.extend(batch_results)
            
            # 批量保存整个批次的结果
            self._save_batch_results(
                batch_results, 
                output_file, 
                experiment_info
            )
            
            # 计算当前批次处理时间并更新预计时间
            batch_time = time.time() - batch_start_time
            total_processing_time += batch_time
            processed_batches += 1
            
            # 记录批次进度
            processed = min(i + batch_size, len(experiment_data))
            print(f"处理进度: {processed}/{len(experiment_data)}")
            
            # 保存断点信息
            self._save_checkpoint(window_size, start_index + processed, total_samples)
            
            # 如果已经处理了至少一个批次，计算并显示预计剩余时间
            if processed_batches > 0:
                avg_batch_time = total_processing_time / processed_batches
                remaining_batches = total_batches - processed_batches
                estimated_remaining_time = remaining_batches * avg_batch_time
                
                # 转换为友好的时间格式
                def format_time(seconds):
                    if seconds < 60:
                        return f"{seconds:.1f}秒"
                    elif seconds < 3600:
                        minutes, seconds = divmod(seconds, 60)
                        return f"{int(minutes)}分{int(seconds)}秒"
                    else:
                        hours, remainder = divmod(seconds, 3600)
                        minutes, seconds = divmod(remainder, 60)
                        return f"{int(hours)}时{int(minutes)}分{int(seconds)}秒"
                
                print(f"  当前批次耗时: {format_time(batch_time)}")
                print(f"  平均批次耗时: {format_time(avg_batch_time)}")
                print(f"  预计剩余时间: {format_time(estimated_remaining_time)}")
        
        # 所有数据处理完成，删除断点文件
        checkpoint_file = self._get_checkpoint_file(window_size)
        if os.path.exists(checkpoint_file):
            os.remove(checkpoint_file)
            logger.info(f"已删除断点文件: {checkpoint_file}")
            
        return results

    def _get_batch_size(self) -> int:
        """
        获取批处理大小
        
        Returns:
            批处理大小
        """
        # 从配置中获取批处理大小，优先使用BATCH_SIZE配置
        if hasattr(self.config, 'BATCH_SIZE') and self.config.BATCH_SIZE:
            return self.config.BATCH_SIZE
        return 8  # 默认批处理大小

    def _prepare_experiment_info(self, window_size: Union[int, str]) -> Dict[str, Any]:
        """
        准备实验信息
        
        Args:
            window_size: 上下文窗口大小
            
        Returns:
            实验信息字典
        """
        return {
            "experiment_id": f"context_window_{window_size}",
            "context_window": window_size,
            "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
            "model_path": self.config.MODEL_PATH,
            "model_name": self.get_model_name_from_path(self.config.MODEL_PATH)
        }

    def _prepare_output_file(self, window_size: Union[int, str]) -> str:
        """
        准备输出文件路径
        
        Args:
            window_size: 上下文窗口大小
            
        Returns:
            输出文件路径
        """
        model_name = self.get_model_name_from_path(self.config.MODEL_PATH)
        
        # 判断是否使用模块化方案
        # 使用config中的USE_MODULAR_APPROACH配置项
        is_modular_method = hasattr(self.config, 'USE_MODULAR_APPROACH') and self.config.USE_MODULAR_APPROACH
        
        # 根据不同方法选择不同的结果目录
        if is_modular_method:
            # 模块化方案结果输出到method目录
            #output_dir = "/mnt/ssd/jsj/patient/results/method"
            output_dir = "/mnt/ssd/jsj/patient/results/method/short"
        else:
            # 其他方案继续使用原来的Matrix目录
            output_dir = os.path.join(self.config.OUTPUT_DIR, "Matrix")
        
        # 确保输出目录存在
        os.makedirs(output_dir, exist_ok=True)
        
        return os.path.join(
            output_dir, 
            f"experiment_results_{model_name}_window_{window_size}.json"
        )
        
    def _get_checkpoint_file(self, window_size: Union[int, str]) -> str:
        """
        获取断点信息文件路径
        
        Args:
            window_size: 上下文窗口大小
            
        Returns:
            断点文件路径
        """
        model_name = self.get_model_name_from_path(self.config.MODEL_PATH)
        return os.path.join(
            self.checkpoint_dir, 
            f"checkpoint_{model_name}_window_{window_size}.json"
        )
        
    def _save_checkpoint(self, window_size: Union[int, str], processed_count: int, total_count: int) -> None:
        """
        保存断点信息
        
        Args:
            window_size: 上下文窗口大小
            processed_count: 已处理的数据数量
            total_count: 总数据数量
        """
        checkpoint_file = self._get_checkpoint_file(window_size)
        checkpoint_data = {
            "window_size": window_size,
            "processed_count": processed_count,
            "total_count": total_count,
            "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
            "model_path": self.config.MODEL_PATH
        }
        
        try:
            with open(checkpoint_file, 'w', encoding='utf-8') as f:
                json.dump(checkpoint_data, f, ensure_ascii=False, indent=2)
            logger.info(f"断点信息已保存到: {checkpoint_file}, 已处理: {processed_count}/{total_count}")
        except Exception as e:
            logger.error(f"保存断点信息失败: {str(e)}")
            
    def _load_checkpoint(self, window_size: Union[int, str]) -> dict:
        """
        加载断点信息
        
        Args:
            window_size: 上下文窗口大小
            
        Returns:
            断点信息字典，如果没有断点则返回空字典
        """
        checkpoint_file = self._get_checkpoint_file(window_size)
        
        if not os.path.exists(checkpoint_file):
            logger.info(f"未找到断点文件: {checkpoint_file}")
            return {}
        
        try:
            with open(checkpoint_file, 'r', encoding='utf-8') as f:
                checkpoint_data = json.load(f)
            
            # 验证断点是否有效（模型路径是否一致）
            if checkpoint_data.get("model_path") != self.config.MODEL_PATH:
                logger.warning("断点对应的模型路径与当前模型路径不一致，将忽略断点")
                return {}
            
            logger.info(f"加载断点信息成功: 已处理 {checkpoint_data['processed_count']}/{checkpoint_data['total_count']}")
            return checkpoint_data
        except Exception as e:
            logger.error(f"加载断点信息失败: {str(e)}")
            return {}

    def _process_batch_results(
        self,
        batch: List[Dict[str, Any]],
        generated_responses: List[str],
        batch_start_time: float,
        window_size: Union[int, str]
    ) -> List[Dict[str, Any]]:
        """
        处理批次结果
        
        Args:
            batch: 当前批次数据
            generated_responses: 生成的回答列表
            batch_start_time: 批次处理开始时间
            window_size: 上下文窗口大小
            
        Returns:
            批次结果列表
        """
        batch_results = []
        
        for j, data in enumerate(batch):
            try:
                # 计算所有评估指标
                metrics_result = self.evaluator.calculate_all_metrics(
                    reference=data["real_response"],
                    hypothesis=generated_responses[j],
                    config=self.config
                )
                
                # 整合结果，添加医生提问内容
                result = {
                    "patient_file": data["patient_file"],
                    "dialogue_turn": data["dialogue_turn"],
                    "context_strategy": data["context_strategy"],
                    "generated_response": generated_responses[j],
                    "real_response": data["real_response"],
                    "doctor_question": data.get("doctor_question", ""),
                    **metrics_result,
                    "processing_time": round(time.time() - batch_start_time, 2),
                    "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
                }
                
                batch_results.append(result)
            except Exception as e:
                logger.error(f"处理数据时发生错误: {str(e)}")
                # 添加错误记录
                error_result = {
                    "patient_file": data["patient_file"],
                    "dialogue_turn": data["dialogue_turn"],
                    "context_strategy": data["context_strategy"],
                    "doctor_question": data.get("doctor_question", ""),
                    "error": str(e)
                }
                batch_results.append(error_result)
        
        return batch_results

    def _save_batch_results(
        self,
        batch_results: List[Dict[str, Any]],
        output_file: str,
        experiment_info: Dict[str, Any]
    ) -> None:
        """
        保存批次结果
        
        Args:
            batch_results: 批次结果
            output_file: 输出文件路径
            experiment_info: 实验信息
        """
        if not batch_results:
            return
        
        # 检查文件是否存在
        existing_results = []
        if os.path.exists(output_file):
            try:
                with open(output_file, 'r', encoding='utf-8') as f:
                    existing_results = json.load(f)
                # 确保是列表格式
                if not isinstance(existing_results, list):
                    existing_results = [existing_results]
            except json.JSONDecodeError:
                # 文件存在但内容无效，重新开始
                existing_results = []
        
        # 更新实验信息
        experiment_updated = False
        for i, exp in enumerate(existing_results):
            if exp.get('experiment_id') == experiment_info['experiment_id']:
                # 追加新结果
                exp['results'].extend(batch_results)
                exp['total_samples'] = len(exp['results'])
                # 重新计算平均值
                exp['average_metrics'] = self.result_manager._calculate_average_metrics(exp['results'])
                experiment_updated = True
                break
        
        if not experiment_updated:
            # 创建新的实验组
            new_experiment = experiment_info.copy()
            new_experiment['results'] = batch_results
            new_experiment['total_samples'] = len(batch_results)
            new_experiment['average_metrics'] = self.result_manager._calculate_average_metrics(batch_results)
            existing_results.append(new_experiment)
        
        # 一次性保存所有结果
        try:
            # 使用ResultManager的save_results方法来处理可能的不可序列化对象
            self.result_manager.save_results(existing_results, output_file)
        except Exception as e:
            logger.error(f"批量保存实验结果失败: {str(e)}")
            import traceback
            logger.error(f"详细错误栈: {traceback.format_exc()}")