import os
import json
import logging
import hashlib
from typing import List, Dict, Any, Optional, Callable, Union, Tuple, Set

# 设置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

class DataProcessor:
    """
    数据处理器，负责加载和处理实验数据
    """
    
    # 常量定义
    MIN_DIALOGUE_LENGTH = 6
    DEFAULT_TOP_K = 3
    CACHE_MAX_SIZE = 1000  # 缓存最大条目数
    LOCAL_MODEL_PATH = "/mnt/ssd/jsj/models/models/sentence-transformers/all-MiniLM-L6-v2"
    
    def __init__(self, config: Any):
        """
        初始化数据处理器
        
        Args:
            config: 配置对象或字典
        """
        self.config = config
        # 导入语句放在类级别，避免重复导入
        self._sentence_transformer = None
        self._model = None
        # 优化的缓存管理
        self._cache = {
            'dialogue_embeddings': {}
        }
        self._cache_keys: List[str] = []  # 用于LRU缓存
    
    def load_patient_data(self, data_dir: str, num_samples: Optional[int] = None) -> List[Dict[str, Any]]:
        """
        加载指定目录下的所有JSON文件
        
        Args:
            data_dir: 包含JSON文件的目录路径
            num_samples: 可选的样本数量限制，如果为None则加载所有数据
        
        Returns:
            包含数据的列表
        """
        data_list = []
        
        try:
            if not os.path.exists(data_dir):
                raise FileNotFoundError(f"数据目录不存在: {data_dir}")
            
            # 获取目录下所有JSON文件
            json_files = self._find_json_files(data_dir)
            
            if not json_files:
                logger.warning(f"数据目录中没有找到JSON文件: {data_dir}")
                return data_list
            
            # 读取每个JSON文件
            for json_file in json_files:
                try:
                    with open(json_file, 'r', encoding='utf-8') as f:
                        data = json.load(f)
                        # 提取核心信息
                        patient_data = {
                            "file_name": os.path.basename(json_file),
                            "基础信息": self._extract_info(data, "基础信息"),
                            "人格": self._extract_info(data, "人格"),
                            "门诊病历": self._extract_info(data, "门诊病历"),
                            "门诊对话": self._extract_dialogues(data)
                        }
                        data_list.append(patient_data)
                        logger.debug(f"成功加载数据: {os.path.basename(json_file)}")
                except json.JSONDecodeError as e:
                    logger.error(f"解析JSON文件失败: {json_file}, 错误: {str(e)}")
                except Exception as e:
                    logger.error(f"读取文件失败: {json_file}, 错误: {str(e)}")
        except Exception as e:
            logger.error(f"加载数据时发生错误: {str(e)}")
        
        # 应用样本数量限制
        if num_samples is not None and len(data_list) > num_samples:
            data_list = data_list[:num_samples]
            logger.info(f"限制样本数量为: {num_samples}")
        
        return data_list

    def prepare_context_data(
        self, 
        patient_data: Dict[str, Any], 
        window_size: Union[int, str], 
        semantic_search_func: Optional[Callable] = None
    ) -> List[Dict[str, Any]]:
        """
        根据不同的窗口大小准备上下文数据
        
        Args:
            patient_data: 数据
            window_size: 窗口大小，可以是整数（2,4,6等）或字符串（"full", "semantic"）
            semantic_search_func: 语义检索函数，仅在window_size="semantic"时使用
        
        Returns:
            包含上下文数据、医生提问和真实回答的列表
        """
        prepared_data = []
        dialogues = patient_data["门诊对话"]
    
        # 至少需要有2轮对话才能进行上下文准备
        if len(dialogues) < 2:
            logger.warning(f"数据 {patient_data['file_name']} 的对话轮次不足，无法准备上下文数据")
            return prepared_data
        
        # 为每一轮对话准备上下文数据（从第二轮开始）
        for i in range(1, len(dialogues)):
            # 获取当前轮次的医生提问和患者回答
            doctor_question = dialogues[i].get("医生", "")
            patient_response = dialogues[i].get("患者", "")
            
            # 筛选高质量对话
            if not self._is_high_quality_dialogue(doctor_question, patient_response):
                logger.debug(f"跳过对话轮次 {i} (文件: {patient_data['file_name']})，因为对话质量不满足要求")
                continue
            
            # 根据窗口大小选择上下文
            context_history = self._select_context_history(
                dialogues, i, window_size, semantic_search_func, doctor_question
            )
            
            # 准备当前轮次的数据
            current_data = self._prepare_current_data(
                patient_data, context_history, doctor_question, 
                patient_response, window_size, i
            )
            
            prepared_data.append(current_data)
        
        logger.debug(f"为文件 {patient_data['file_name']} 准备了 {len(prepared_data)} 条上下文数据")
        return prepared_data
        
    def _prepare_current_data(
        self, 
        patient_data: Dict[str, Any],
        context_history: List[Dict[str, str]],
        doctor_question: str,
        patient_response: str,
        window_size: Union[int, str],
        dialogue_turn: int
    ) -> Dict[str, Any]:
        """
        准备当前轮次的数据
        
        Args:
            patient_data: 患者数据
            context_history: 上下文历史
            doctor_question: 医生提问
            patient_response: 患者回答
            window_size: 窗口大小
            dialogue_turn: 对话轮次
            
        Returns:
            准备好的当前轮次数据
        """
        return {
            "patient_file": patient_data["file_name"],
            "基础信息": patient_data["基础信息"],
            "人格": patient_data["人格"],
            "门诊病历": patient_data["门诊病历"],
            "context_history": context_history,
            "doctor_question": doctor_question,
            "real_response": patient_response,
            "context_strategy": window_size,
            "dialogue_turn": dialogue_turn
        }
        
    def _select_context_history(
        self,
        dialogues: List[Dict[str, str]],
        current_index: int,
        window_size: Union[int, str],
        semantic_search_func: Optional[Callable],
        current_question: str
    ) -> List[Dict[str, str]]:
        """
        根据窗口大小选择上下文历史
        
        Args:
            dialogues: 所有对话
            current_index: 当前对话索引
            window_size: 窗口大小
            semantic_search_func: 语义搜索函数
            current_question: 当前问题（用于语义搜索）
        
        Returns:
            选定的上下文历史
        """
        # 选择历史对话范围（排除当前对话）
        if window_size == "full":
            # 使用所有历史对话作为上下文
            return dialogues[:current_index]
        elif isinstance(window_size, int) and window_size > 0:
            # 使用固定窗口大小的最近对话作为上下文
            start_idx = max(0, current_index - window_size)
            return dialogues[start_idx:current_index]
        elif window_size == "semantic" and semantic_search_func:
            # 使用语义检索选择最相关的对话作为上下文
            history_dialogues = dialogues[:current_index]
            return semantic_search_func(history_dialogues, current_question)
        else:
            # 默认使用最近2轮对话作为上下文
            start_idx = max(0, current_index - 2)
            return dialogues[start_idx:current_index]
        
    def _is_high_quality_dialogue(self, doctor_question: str, patient_response: str) -> bool:
        """
        判断对话是否为高质量对话
        
        Args:
            doctor_question: 医生的提问
            patient_response: 患者的回答
            
        Returns:
            对话是否为高质量对话
        """
        # 检查发言是否为空
        if not doctor_question or not doctor_question.strip():
            logger.debug("医生问题为空，跳过此对话")
            return False
        if not patient_response or not patient_response.strip():
            logger.debug("患者回答为空，跳过此对话")
            return False
        
        # 检查发言长度是否足够
        doctor_len = len(doctor_question.strip())
        patient_len = len(patient_response.strip())
        
        # 两个发言都需要满足最低长度要求
        return doctor_len >= self.MIN_DIALOGUE_LENGTH and patient_len >= self.MIN_DIALOGUE_LENGTH
    
    def _extract_info(self, data: Dict[str, Any], key: str) -> str:
        """
        从数据中提取指定字段的信息
        
        Args:
            data: 原始数据
            key: 要提取的字段名
        
        Returns:
            提取的信息字符串
        """
        if key in data and data[key] is not None:
            value = data[key]
            if isinstance(value, (str, int, float, bool)):
                return str(value)
            elif isinstance(value, (list, dict)):
                try:
                    return json.dumps(value, ensure_ascii=False)
                except Exception:
                    return str(value)
        return "未知"
    
    def _extract_dialogues(self, data: Dict[str, Any]) -> List[Dict[str, str]]:
        """
        提取医患对话
        
        Args:
            data: 原始数据
        
        Returns:
            对话列表
        """
        dialogues = []
        
        if "门诊对话" in data and isinstance(data["门诊对话"], list):
            for turn in data["门诊对话"]:
                if isinstance(turn, dict) and "医生" in turn and "患者" in turn:
                    dialogues.append({
                        "医生": str(turn["医生"]) if turn["医生"] is not None else "",
                        "患者": str(turn["患者"]) if turn["患者"] is not None else ""
                    })
        
        return dialogues
    
    def _find_json_files(self, data_dir: str) -> List[str]:
        """
        查找目录下所有的JSON文件
        
        Args:
            data_dir: 数据目录
        
        Returns:
            JSON文件路径列表
        """
        json_files = []
        # 遍历目录及其子目录
        for root, _, files in os.walk(data_dir):
            for file in files:
                if file.lower().endswith('.json'):
                    json_files.append(os.path.join(root, file))
        
        logger.info(f"在目录 {data_dir} 中找到 {len(json_files)} 个JSON文件")
        return json_files
    
    def get_semantic_search_function(self) -> Optional[Callable]:
        """
        创建优化版的语义检索函数
        
        Returns:
            语义搜索函数或None（如果初始化失败）
        """
        try:
            # 延迟导入，避免不必要的依赖
            import torch
            from sentence_transformers import SentenceTransformer
            
            # 缓存模型导入结果
            if self._sentence_transformer is None:
                self._sentence_transformer = SentenceTransformer
            
            # 加载句子嵌入模型
            if self._model is None:
                self._model = self._load_sentence_transformer_model()
                if self._model is None:
                    return None
            
            # 获取top_k参数
            top_k = self._get_top_k_from_config()
            
            # 使用优化版的语义搜索函数
            semantic_search = self._create_optimized_semantic_search(self._model, top_k)
            
            return semantic_search
        except ImportError as e:
            logger.error(f"无法导入语义检索所需的库: {str(e)}")
            return None
    
    def _load_sentence_transformer_model(self) -> Optional[Any]:
        """
        加载句子嵌入模型
        
        Returns:
            模型实例或None
        """
        try:
            import torch
            
            # 尝试以离线模式加载模型
            os.environ['TRANSFORMERS_OFFLINE'] = '1'
            os.environ['HF_HUB_OFFLINE'] = '1'
            # 使用配置中的DEVICE设置
            device = self.config.DEVICE
            logger.info(f"使用设备: {device} 加载sentence-transformers模型")
            
            if os.path.exists(self.LOCAL_MODEL_PATH):
                model = self._sentence_transformer(self.LOCAL_MODEL_PATH, device=device)
                logger.info(f"成功加载本地sentence-transformers模型: {self.LOCAL_MODEL_PATH}")
                return model
            else:
                logger.error(f"本地模型路径不存在: {self.LOCAL_MODEL_PATH}")
                return None
        except Exception as e:
            logger.error(f"加载sentence-transformers模型失败: {str(e)}")
            return None
    
    def _get_top_k_from_config(self) -> int:
        """
        从配置中获取top_k参数
        
        Returns:
            top_k值
        """
        try:
            # 尝试从配置中获取参数
            if hasattr(self.config, 'EXPERIMENT_CONFIG'):
                semantic_config = getattr(self.config, 'EXPERIMENT_CONFIG')
                if hasattr(semantic_config, 'get'):
                    search_config = semantic_config.get("semantic_search", {})
                else:
                    search_config = getattr(semantic_config, 'semantic_search', {})
                
                if hasattr(search_config, 'get'):
                    return search_config.get("top_k", self.DEFAULT_TOP_K)
                else:
                    return getattr(search_config, 'top_k', self.DEFAULT_TOP_K)
            return self.DEFAULT_TOP_K
        except Exception as e:
            logger.warning(f"无法获取语义搜索配置，使用默认值top_k={self.DEFAULT_TOP_K}: {str(e)}")
            return self.DEFAULT_TOP_K
    
    def _create_optimized_semantic_search(self, model: Any, top_k: int = DEFAULT_TOP_K) -> Callable:
        """
        创建优化版的语义搜索函数
        
        Args:
            model: sentence-transformers模型实例
            top_k: 返回的最相关对话数量
            
        Returns:
            优化版的语义搜索函数
        """
        from sklearn.metrics.pairwise import cosine_similarity
        
        def optimized_semantic_search(dialogues: List[Dict[str, str]], query: str) -> List[Dict[str, str]]:
            """
            优化版的语义搜索函数，添加缓存机制减少重复计算
            """
            if not dialogues:
                return []
            
            # 为对话集合创建唯一标识符
            dialogues_id = self._get_dialogues_identifier(dialogues)
            
            # 计算对话嵌入（如果缓存中不存在）
            dialogue_embeddings = self._get_or_compute_dialogue_embeddings(dialogues, dialogues_id, model)
            if dialogue_embeddings is None:
                return []
            
            # 计算查询嵌入
            query_embedding = model.encode([query], show_progress_bar=False, convert_to_tensor=False)
            
            # 根据对话数量选择合适的相似度计算方法
            top_indices = self._calculate_top_similar_indices(
                dialogue_embeddings, query_embedding, top_k, len(dialogues)
            )
            
            # 返回最相关的对话轮次（保持原始顺序）
            return [dialogues[idx] for idx in sorted(top_indices)]
        
        return optimized_semantic_search
    
    def _get_dialogues_identifier(self, dialogues: List[Dict[str, str]]) -> str:
        """
        为对话集合创建唯一标识符
        
        Args:
            dialogues: 对话列表
        
        Returns:
            唯一标识符
        """
        dialogues_str = str([f"{d.get('医生', '')}|{d.get('患者', '')}" for d in dialogues])
        return hashlib.md5(dialogues_str.encode()).hexdigest()
    
    def _get_or_compute_dialogue_embeddings(
        self, dialogues: List[Dict[str, str]], dialogues_id: str, model: Any
    ) -> Optional[List[List[float]]]:
        """
        获取或计算对话嵌入
        
        Args:
            dialogues: 对话列表
            dialogues_id: 对话唯一标识符
            model: 嵌入模型
        
        Returns:
            对话嵌入列表或None
        """
        # 检查缓存
        if dialogues_id in self._cache['dialogue_embeddings']:
            # 更新访问顺序（LRU）
            if dialogues_id in self._cache_keys:
                self._cache_keys.remove(dialogues_id)
            self._cache_keys.append(dialogues_id)
            return self._cache['dialogue_embeddings'][dialogues_id]
        
        try:
            # 计算新的嵌入
            dialogue_texts = [f"医生: {d.get('医生', '')}\n患者: {d.get('患者', '')}" for d in dialogues]
            dialogue_embeddings = model.encode(
                dialogue_texts, show_progress_bar=False, convert_to_tensor=False
            )
            
            # 管理缓存大小
            self._manage_cache_size()
            
            # 存储到缓存
            self._cache['dialogue_embeddings'][dialogues_id] = dialogue_embeddings
            self._cache_keys.append(dialogues_id)
            
            return dialogue_embeddings
        except Exception as e:
            logger.error(f"计算对话嵌入失败: {str(e)}")
            return None
    
    def _manage_cache_size(self) -> None:
        """
        管理缓存大小，移除最旧的条目
        """
        if len(self._cache['dialogue_embeddings']) > self.CACHE_MAX_SIZE:
            # 找出最旧的缓存键并移除
            oldest_key = self._cache_keys.pop(0)
            del self._cache['dialogue_embeddings'][oldest_key]
            logger.debug(f"缓存大小超过限制，移除最旧的条目: {oldest_key}")
    
    def _calculate_top_similar_indices(
        self, 
        dialogue_embeddings: List[List[float]],
        query_embedding: List[List[float]],
        top_k: int,
        dialogue_count: int
    ) -> List[int]:
        """
        计算最相似的对话索引
        
        Args:
            dialogue_embeddings: 对话嵌入
            query_embedding: 查询嵌入
            top_k: 返回的最相似数量
            dialogue_count: 对话总数
        
        Returns:
            最相似的对话索引列表
        """
        if dialogue_count < top_k * 2:
            # 少量对话使用简单的向量点积
            similarities = []
            for i, dialogue_embedding in enumerate(dialogue_embeddings):
                # 确保对话嵌入和查询嵌入都是有效的
                if len(dialogue_embedding) > 0 and len(query_embedding[0]) > 0:
                    # 确保向量长度相同
                    min_len = min(len(dialogue_embedding), len(query_embedding[0]))
                    sim = sum(a * b for a, b in zip(
                        dialogue_embedding[:min_len], query_embedding[0][:min_len]
                    ))
                    similarities.append((sim, i))
                else:
                    similarities.append((0, i))  # 无效嵌入的相似度为0
            
            # 排序并选择top_k
            similarities.sort(reverse=True, key=lambda x: x[0])
            return [idx for _, idx in similarities[:top_k]]
        else:
            # 大量对话使用cosine_similarity
            try:
                similarities = cosine_similarity(query_embedding, dialogue_embeddings)[0]
                # 防止top_k大于对话数量
                actual_top_k = min(top_k, dialogue_count)
                return similarities.argsort()[-actual_top_k:][::-1]  # 获取top_k个最相关的索引
            except Exception as e:
                logger.error(f"计算余弦相似度失败: {str(e)}")
                # 失败时返回最近的对话
                return list(range(max(0, dialogue_count - top_k), dialogue_count))

# 为了兼容旧的函数式调用方式，提供全局函数
def prepare_context_data(patient_data: Dict[str, Any], window_size: Union[int, str], 
                        semantic_search_func: Optional[Callable] = None) -> List[Dict[str, Any]]:
    """
    兼容旧版本的函数式调用
    
    Args:
        patient_data: 患者数据
        window_size: 窗口大小
        semantic_search_func: 语义搜索函数
    
    Returns:
        准备好的上下文数据
    """
    # 创建一个临时的DataProcessor实例来调用类方法
    class DummyConfig:
        pass
    
    dummy_config = DummyConfig()
    data_processor = DataProcessor(dummy_config)
    return data_processor.prepare_context_data(patient_data, window_size, semantic_search_func)