#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
semantic实验组性能优化方案

此脚本提供了data_processor.py中语义搜索功能的优化版本，主要解决semantic实验组运行缓慢的问题。
"""
import logging
import functools
from typing import List, Dict, Any, Optional

# 设置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)


def memoize(func):
    """
    简单的缓存装饰器，用于缓存函数调用结果
    """
    cache = {}    
    def wrapper(*args, **kwargs):
        # 创建一个可哈希的键
        key = str(args) + str(kwargs)
        if key not in cache:
            cache[key] = func(*args, **kwargs)
        return cache[key]
    return wrapper


def optimized_semantic_search_factory(model, top_k=3):
    """
    创建优化版的语义搜索函数
    
    Args:
        model: sentence-transformers模型实例
        top_k: 返回的最相关对话数量
        
    Returns:
        优化版的语义搜索函数
    """
    # 为每个对话集合创建单独的缓存
    dialogue_embeddings_cache = {}
    
    def optimized_semantic_search(dialogues, query):
        """
        优化版的语义搜索函数，添加缓存机制减少重复计算
        """
        if not dialogues:
            return []
        
        # 为对话集合创建唯一标识符
        dialogues_id = str(len(dialogues)) + '-' + str(hash(str(dialogues)))
        
        # 计算对话嵌入（如果缓存中不存在）
        if dialogues_id not in dialogue_embeddings_cache:
            dialogue_texts = [f"医生: {d['医生']}\n患者: {d['患者']}" for d in dialogues]
            # 批量计算所有对话的嵌入
            dialogue_embeddings = model.encode(dialogue_texts, show_progress_bar=False, convert_to_tensor=False)
            dialogue_embeddings_cache[dialogues_id] = dialogue_embeddings
        else:
            dialogue_embeddings = dialogue_embeddings_cache[dialogues_id]
        
        # 计算查询嵌入
        query_embedding = model.encode([query], show_progress_bar=False, convert_to_tensor=False)
        
        # 如果对话数量很少，直接计算相似度
        if len(dialogues) < top_k * 2:
            # 使用更轻量级的相似度计算方法
            similarities = []
            for i, dialogue_embedding in enumerate(dialogue_embeddings):
                # 简单的向量点积计算相似度
                sim = sum(a * b for a, b in zip(query_embedding[0], dialogue_embedding))
                similarities.append((sim, i))
            
            # 排序并选择top_k
            similarities.sort(reverse=True, key=lambda x: x[0])
            top_indices = [idx for _, idx in similarities[:top_k]]
        else:
            # 对于大量对话，使用scikit-learn的cosine_similarity
            from sklearn.metrics.pairwise import cosine_similarity
            similarities = cosine_similarity(query_embedding, dialogue_embeddings)[0]
            top_indices = similarities.argsort()[-top_k:][::-1]  # 获取top_k个最相关的索引
        
        # 返回最相关的对话轮次（保持原始顺序）
        return [dialogues[idx] for idx in sorted(top_indices)]
    
    return optimized_semantic_search


def create_optimized_data_processor_patch():
    """
    创建data_processor.py的优化补丁代码
    """
    patch_code = """
# 语义搜索功能优化补丁

# 在get_semantic_search_function方法中，替换原来的实现为优化版
    def get_semantic_search_function(self):
        """创建优化版的语义检索函数"""
        try:
            from sentence_transformers import SentenceTransformer
            import numpy as np
            from sklearn.metrics.pairwise import cosine_similarity
            
            # 加载句子嵌入模型
            # 使用本地sentence-transformers模型
            local_model_path = "/mnt/ssd/jsj/models/models/sentence-transformers/all-MiniLM-L6-v2"
            try:
                # 尝试以离线模式加载模型
                import os
                import torch
                os.environ['TRANSFORMERS_OFFLINE'] = '1'
                os.environ['HF_HUB_OFFLINE'] = '1'
                device = "cuda" if torch.cuda.is_available() else "cpu"
                model = SentenceTransformer(local_model_path, device=device)
                logger.info(f"成功加载本地sentence-transformers模型: {local_model_path}")
            except Exception as e:
                logger.error(f"加载sentence-transformers模型失败: {str(e)}")
                return None
            
            # 获取top_k参数，添加防御性编程
            try:
                semantic_config = getattr(self.config, 'EXPERIMENT_CONFIG', {})
                if hasattr(semantic_config, 'get'):
                    search_config = semantic_config.get("semantic_search", {})
                else:
                    search_config = getattr(semantic_config, 'semantic_search', {})
                
                if hasattr(search_config, 'get'):
                    top_k = search_config.get("top_k", 3)
                else:
                    top_k = getattr(search_config, 'top_k', 3)
            except Exception as e:
                logger.warning(f"无法获取语义搜索配置，使用默认值top_k=3: {str(e)}")
                top_k = 3
            
            # 使用优化版的语义搜索函数工厂
            semantic_search = self._create_optimized_semantic_search(model, top_k)
            
            return semantic_search
        except ImportError as e:
            logger.error(f"无法导入语义检索所需的库: {str(e)}")
            return None
            
    def _create_optimized_semantic_search(self, model, top_k=3):
        """
        创建优化版的语义搜索函数
        
        Args:
            model: sentence-transformers模型实例
            top_k: 返回的最相关对话数量
            
        Returns:
            优化版的语义搜索函数
        """
        # 为每个对话集合创建单独的缓存
        dialogue_embeddings_cache = {}
        
        def optimized_semantic_search(dialogues, query):
            """
            优化版的语义搜索函数，添加缓存机制减少重复计算
            """
            if not dialogues:
                return []
            
            # 为对话集合创建唯一标识符
            dialogues_id = str(len(dialogues)) + '-' + str(hash(str(dialogues)))
            
            # 计算对话嵌入（如果缓存中不存在）
            if dialogues_id not in dialogue_embeddings_cache:
                dialogue_texts = [f"医生: {d['医生']}\n患者: {d['患者']}" for d in dialogues]
                # 批量计算所有对话的嵌入
                dialogue_embeddings = model.encode(dialogue_texts, show_progress_bar=False, convert_to_tensor=False)
                dialogue_embeddings_cache[dialogues_id] = dialogue_embeddings
            else:
                dialogue_embeddings = dialogue_embeddings_cache[dialogues_id]
            
            # 计算查询嵌入
            query_embedding = model.encode([query], show_progress_bar=False, convert_to_tensor=False)
            
            # 如果对话数量很少，直接计算相似度
            if len(dialogues) < top_k * 2:
                # 使用更轻量级的相似度计算方法
                similarities = []
                for i, dialogue_embedding in enumerate(dialogue_embeddings):
                    # 简单的向量点积计算相似度
                    sim = sum(a * b for a, b in zip(query_embedding[0], dialogue_embedding))
                    similarities.append((sim, i))
                
                # 排序并选择top_k
                similarities.sort(reverse=True, key=lambda x: x[0])
                top_indices = [idx for _, idx in similarities[:top_k]]
            else:
                # 对于大量对话，使用scikit-learn的cosine_similarity
                similarities = cosine_similarity(query_embedding, dialogue_embeddings)[0]
                top_indices = similarities.argsort()[-top_k:][::-1]  # 获取top_k个最相关的索引
            
            # 返回最相关的对话轮次（保持原始顺序）
            return [dialogues[idx] for idx in sorted(top_indices)]
        
        return optimized_semantic_search
"""
    
    return patch_code


if __name__ == "__main__":
    # 输出优化方案摘要
    print("=" * 80)
    print("semantic实验组性能优化方案")
    print("=" * 80)
    print("\n问题分析:")
    print("1. 语义搜索功能为每个对话轮次单独计算嵌入向量，没有缓存机制")
    print("2. 随着对话轮次增加，计算量呈指数级增长")
    print("3. 模型编码操作相对耗时")
    
    print("\n优化方案:")
    print("1. 添加对话嵌入向量缓存，避免重复计算")
    print("2. 对不同规模的对话集合使用不同的相似度计算策略")
    print("3. 优化模型编码参数，提高计算效率")
    
    print("\n使用方法:")
    print("1. 将本脚本中的优化代码应用到data_processor.py文件中")
    print("2. 或者直接运行: python semantic_optimization.py apply")
    
    # 如果有命令行参数，应用补丁
    import sys
    if len(sys.argv) > 1 and sys.argv[1] == "apply":
        print("\n正在应用优化补丁...")
        patch_code = create_optimized_data_processor_patch()
        
        # 保存补丁代码到临时文件
        with open("data_processor_patch.txt", "w", encoding="utf-8") as f:
            f.write(patch_code)
        
        print("优化补丁已保存到 data_processor_patch.txt")
        print("请手动将补丁内容应用到 data_processor.py 文件中的相应位置")
    
    print("\n" + "=" * 80)