# -*- coding: utf-8 -*-

"""测试semantic搜索功能的脚本，用于诊断性能问题"""

import os
import sys
import time
import json
import logging
import traceback
from datetime import datetime

sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from config import Config
from data_processor import DataProcessor

# 设置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

class SemanticSearchTester:
    """语义搜索功能测试类"""
    
    def __init__(self):
        """初始化测试器"""
        self.config = Config()
        self.data_processor = DataProcessor(self.config)
        self.semantic_search_func = None
        self.test_data = None
        self.model = None
    
    def check_config_files(self):
        """检查所有配置文件"""
        logger.info("=== 配置文件完整性检查 ===")
        config_files = [
            '/mnt/ssd/jsj/patient/script/config.py',
            '/mnt/ssd/jsj/patient/script/run_experiment.py',
            '/mnt/ssd/jsj/patient/script/experiment_manager.py',
            '/mnt/ssd/jsj/patient/script/data_processor.py'
        ]
        
        for file_path in config_files:
            if os.path.exists(file_path):
                logger.info(f"配置文件存在: {file_path}")
            else:
                logger.error(f"配置文件不存在: {file_path}")
    
    def load_semantic_search(self):
        """加载语义搜索功能"""
        logger.info("正在加载语义搜索功能...")
        start_time = time.time()
        
        try:
            # 先尝试直接使用get_semantic_search_function方法
            logger.info("尝试使用get_semantic_search_function方法...")
            self.semantic_search_func = self.data_processor.get_semantic_search_function()
            
            if self.semantic_search_func is not None:
                elapsed = time.time() - start_time
                logger.info(f"语义搜索功能通过get_semantic_search_function方法加载完成，耗时: {elapsed:.2f}秒")
                return True
            
            # 如果get_semantic_search_function方法失败，尝试手动加载模型和创建搜索函数
            logger.info("get_semantic_search_function方法失败，尝试手动加载模型...")
            
            # 手动加载sentence-transformers模型
            from sentence_transformers import SentenceTransformer
            import torch
            
            local_model_path = "/mnt/ssd/jsj/models/models/sentence-transformers/all-MiniLM-L6-v2"
            device = "cuda" if torch.cuda.is_available() else "cpu"
            logger.info(f"手动加载模型到设备: {device}")
            
            # 加载模型
            model_start_time = time.time()
            self.model = SentenceTransformer(local_model_path, device=device)
            model_elapsed = time.time() - model_start_time
            logger.info(f"模型加载耗时: {model_elapsed:.2f}秒")
            
            # 获取top_k参数
            top_k = 3
            try:
                search_config = getattr(self.config, 'semantic_search', None)
                if search_config:
                    top_k = getattr(search_config, 'top_k', 3)
                else:
                    top_k = self.config.EXPERIMENT_CONFIG.get('semantic_search', {}).get('top_k', 3)
            except Exception as e:
                logger.warning(f"无法获取语义搜索配置，使用默认值top_k=3: {str(e)}")
                top_k = 3
            
            # 手动创建优化版的语义搜索函数
            if hasattr(self.data_processor, '_create_optimized_semantic_search'):
                self.semantic_search_func = self.data_processor._create_optimized_semantic_search(self.model, top_k)
                
                elapsed = time.time() - start_time
                logger.info(f"语义搜索功能通过手动加载模型创建完成，耗时: {elapsed:.2f}秒")
                
                if self.semantic_search_func is None:
                    logger.error("无法创建语义搜索功能")
                    return False
                return True
            else:
                logger.error("DataProcessor没有_create_optimized_semantic_search方法")
                return False
        except Exception as e:
            logger.error(f"加载语义搜索功能时出错: {str(e)}")
            traceback.print_exc()
            return False
    
    def load_test_data(self):
        """加载测试数据"""
        logger.info(f"正在从 {self.config.DATA_DIR} 加载测试数据...")
        
        try:
            # 只加载少量数据用于测试
            self.test_data = self.data_processor.load_patient_data(self.config.DATA_DIR, num_samples=1)
            logger.info(f"成功加载 {len(self.test_data)} 条测试数据")
            
            # 打印数据结构以调试
            if self.test_data:
                logger.info(f"第一条数据的键: {self.test_data[0].keys()}")
                # 保存第一条数据的结构到日志文件，以便详细分析
                with open('data_structure.log', 'w', encoding='utf-8') as f:
                    json.dump(self.test_data[0], f, ensure_ascii=False, indent=2)
                logger.info("数据结构已保存到 data_structure.log")
            return True
        except Exception as e:
            logger.error(f"加载测试数据时出错: {str(e)}")
            traceback.print_exc()
            return False
    
    def test_semantic_search_performance(self):
        """测试语义搜索性能"""
        if not self.semantic_search_func:
            logger.error("无法进行测试，缺少语义搜索函数")
            return
        
        logger.info("开始测试语义搜索性能...")
        
        # 创建模拟对话数据进行测试
        mock_dialogues = [
            {"医生": "你好，请问哪里不舒服？", "患者": "我最近感觉头痛，发热。"},
            {"医生": "有多久了？", "患者": "大概三天了。"},
            {"医生": "有服药吗？", "患者": "有吃一些退烧药，但效果不明显。"}
        ]
        logger.info("使用模拟对话数据进行测试")
        
        # 测试不同规模的对话集合
        test_sizes = [3, 5]  # 减少测试规模以加快测试速度
        
        for size in test_sizes:
            # 根据size扩展mock对话
            if size > len(mock_dialogues):
                # 复制现有对话以创建更多测试数据
                extended_dialogues = mock_dialogues * (size // len(mock_dialogues) + 1)
                test_dialogues = extended_dialogues[:size]
            else:
                test_dialogues = mock_dialogues[:size]
            
            query = "患者有什么症状？"  # 简单的测试查询
            
            logger.info(f"测试对话数量: {size}")
            
            # 测试第一次搜索（无缓存）
            start_time = time.time()
            try:
                results = self.semantic_search_func(test_dialogues, query)
                elapsed = time.time() - start_time
                logger.info(f"首次搜索耗时: {elapsed:.4f}秒，返回 {len(results)} 条结果")
            except Exception as e:
                logger.error(f"首次搜索出错: {str(e)}")
                traceback.print_exc()
                
            # 测试第二次搜索（有缓存）
            start_time = time.time()
            try:
                results = self.semantic_search_func(test_dialogues, query)
                elapsed = time.time() - start_time
                logger.info(f"缓存后搜索耗时: {elapsed:.4f}秒，返回 {len(results)} 条结果")
            except Exception as e:
                logger.error(f"缓存后搜索出错: {str(e)}")
                traceback.print_exc()
    
    def test_dialogue_encoding(self):
        """专门测试对话编码性能"""
        logger.info("开始测试对话编码性能...")
        
        # 准备模拟对话数据
        mock_dialogues = [
            {"医生": "你好，请问哪里不舒服？", "患者": "我最近感觉头痛，发热。"},
            {"医生": "有多久了？", "患者": "大概三天了。"},
            {"医生": "有服药吗？", "患者": "有吃一些退烧药，但效果不明显。"}
        ]
        
        logger.info(f"使用模拟对话数据进行编码测试")
        
        try:
            from sentence_transformers import SentenceTransformer
            import torch
            
            # 模拟data_processor中的模型加载
            local_model_path = "/mnt/ssd/jsj/models/models/sentence-transformers/all-MiniLM-L6-v2"
            device = "cuda" if torch.cuda.is_available() else "cpu"
            logger.info(f"使用设备: {device}")
            
            # 检查CUDA是否可用
            logger.info(f"CUDA可用: {torch.cuda.is_available()}")
            if torch.cuda.is_available():
                logger.info(f"CUDA设备数: {torch.cuda.device_count()}")
                logger.info(f"当前CUDA设备: {torch.cuda.current_device()}")
                logger.info(f"CUDA设备名称: {torch.cuda.get_device_name(0)}")
            
            # 加载模型
            start_time = time.time()
            self.model = SentenceTransformer(local_model_path, device=device)
            elapsed = time.time() - start_time
            logger.info(f"模型加载耗时: {elapsed:.2f}秒")
            
            # 准备对话文本
            dialogue_texts = [f"医生: {d['医生']}\n患者: {d['患者']}" for d in mock_dialogues]
            logger.info(f"准备了 {len(dialogue_texts)} 条对话文本进行编码测试")
            
            # 测试编码性能
            start_time = time.time()
            embeddings = self.model.encode(dialogue_texts, show_progress_bar=False, convert_to_tensor=False)
            elapsed = time.time() - start_time
            logger.info(f"编码耗时: {elapsed:.4f}秒")
            logger.info(f"嵌入向量形状: {embeddings.shape if hasattr(embeddings, 'shape') else f'{len(embeddings)}个向量'}")
            
        except Exception as e:
            logger.error(f"测试对话编码时出错: {str(e)}")
            traceback.print_exc()
    
    def check_experiment_config(self):
        """检查实验配置"""
        logger.info("=== 实验配置检查 ===")
        logger.info(f"上下文窗口配置: {self.config.EXPERIMENT_CONFIG.get('context_windows')}")
        logger.info(f"GROUP_MAX_SAMPLES配置: {self.config.GROUP_MAX_SAMPLES}")
        
        # 检查是否有可能导致semantic组被执行的其他配置
        semantic_refs = []
        for key, value in vars(self.config).items():
            if isinstance(value, dict) and 'semantic' in str(value):
                semantic_refs.append(f"配置项 {key} 包含 'semantic': {value}")
            elif isinstance(value, (list, tuple)) and 'semantic' in str(value):
                semantic_refs.append(f"配置项 {key} 包含 'semantic': {value}")
        
        if semantic_refs:
            logger.warning(f"发现 {len(semantic_refs)} 处包含'semantic'的配置:")
            for ref in semantic_refs:
                logger.warning(ref)
        else:
            logger.info("未发现其他包含'semantic'的配置")
    
    def create_optimization_recommendations(self):
        """创建优化建议"""
        logger.info("=== 优化建议 ===")
        recommendations = []
        
        # 检查GROUP_MAX_SAMPLES
        if 'semantic' in self.config.GROUP_MAX_SAMPLES:
            recommendations.append("1. 从GROUP_MAX_SAMPLES配置中移除'semantic'项，避免实验尝试执行semantic组")
        
        # 检查数据处理效率
        recommendations.append("2. 优化语义搜索的批量处理逻辑，增加超时机制")
        recommendations.append("3. 为semantic搜索添加并行处理支持，提高处理速度")
        recommendations.append("4. 考虑使用轻量级模型替代现有的sentence-transformers模型")
        recommendations.append("5. 增加嵌入缓存机制，避免重复计算")
        
        for rec in recommendations:
            logger.info(rec)
            
        # 保存建议到文件
        with open('semantic_optimization_recommendations.txt', 'w', encoding='utf-8') as f:
            f.write("\n".join(recommendations))
        logger.info("优化建议已保存到 semantic_optimization_recommendations.txt")

if __name__ == "__main__":
    logger.info("===== Semantic搜索功能测试开始 =====")
    tester = SemanticSearchTester()
    
    # 检查配置文件完整性
    tester.check_config_files()
    
    # 检查配置
    tester.check_experiment_config()
    
    # 加载测试数据
    tester.load_test_data()  # 即使加载失败也继续测试
    
    # 加载语义搜索功能
    if not tester.load_semantic_search():
        logger.error("无法加载语义搜索功能，测试终止")
        sys.exit(1)
    
    # 测试语义搜索性能
    tester.test_semantic_search_performance()
    
    # 专门测试对话编码
    tester.test_dialogue_encoding()
    
    # 创建优化建议
    tester.create_optimization_recommendations()
    
    logger.info("===== Semantic搜索功能测试结束 =====")