# -*- coding: utf-8 -*-
# @Function: 语义相似度检查器
# @Description: 连接数据库，使用BERT按语义依次排序相同category_id的问答对，检查语义特别相近的问题
# @Usage: 直接运行此脚本检查数据库中的重复或相似问题
# @Dependencies: Z_config.py, Z_db_manager.py, transformers, torch, sklearn

import os
import sys
import numpy as np
from typing import List, Tuple, Dict
from collections import defaultdict
import pandas as pd

# 导入项目模块
from Z_config import DB_CONFIG, RESERVOIR_MAPPING
from Z_db_manager import DBManager

# 导入BERT相关库
try:
    from transformers import AutoTokenizer, AutoModel
    import torch
    from sklearn.metrics.pairwise import cosine_similarity
    BERT_AVAILABLE = True
    print("成功导入所有必要的库")
except ImportError as e:
    print(f"警告: 导入库失败: {e}")
    print("请确保已安装: pip install transformers torch scikit-learn pandas openpyxl")
    BERT_AVAILABLE = False

class SemanticChecker:
    def __init__(self, model_name='bert-base-chinese', similarity_threshold=0.75):
        """初始化语义检查器
        
        Args:
            model_name (str): BERT模型名称
            similarity_threshold (float): 相似度阈值，降低到0.75使数值不同的问题被视为不同
        """
        self.db_manager = DBManager()
        self.similarity_threshold = similarity_threshold
        
        if not BERT_AVAILABLE:
            raise ImportError("BERT相关库未安装或导入失败")
            
        print(f"正在加载BERT模型: {model_name}")
        try:
            self.tokenizer = AutoTokenizer.from_pretrained(model_name)
            self.model = AutoModel.from_pretrained(model_name)
            self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
            self.model.to(self.device)
            print(f"BERT模型加载成功，使用设备: {self.device}")
        except Exception as e:
            print(f"BERT模型加载失败: {e}")
            print("尝试使用在线模型或检查网络连接")
            raise
    
    def get_bert_embedding(self, text: str) -> np.ndarray:
        """获取文本的BERT嵌入向量
        
        Args:
            text (str): 输入文本
            
        Returns:
            np.ndarray: BERT嵌入向量
        """
        try:
            # 分词和编码
            inputs = self.tokenizer(text, return_tensors='pt', 
                                  truncation=True, padding=True, max_length=512)
            inputs = {k: v.to(self.device) for k, v in inputs.items()}
            
            # 获取模型输出
            with torch.no_grad():
                outputs = self.model(**inputs)
                # 使用[CLS]标记的嵌入作为句子表示
                embeddings = outputs.last_hidden_state[:, 0, :].cpu().numpy()
            
            return embeddings[0]
        except Exception as e:
            print(f"获取BERT嵌入失败: {e}")
            return np.zeros(768)  # 返回零向量作为fallback
    
    def get_all_qa_data(self) -> Dict[str, List[Tuple[str, str]]]:
        """获取所有QA数据，按category_id分组
        
        Returns:
            Dict[str, List[Tuple[str, str]]]: {category_id: [(question, answer), ...]}
        """
        query_sql = """
        SELECT question, answer, category_id 
        FROM qa_knowledge 
        ORDER BY category_id, question
        """
        
        try:
            self.db_manager.connect()
            with self.db_manager.conn.cursor() as cursor:
                cursor.execute(query_sql)
                results = cursor.fetchall()
                
                # 按category_id分组
                grouped_data = defaultdict(list)
                for question, answer_json, category_id in results:
                    # 提取答案文本
                    answer = answer_json.get('answer', '') if isinstance(answer_json, dict) else str(answer_json)
                    grouped_data[category_id].append((question, answer))
                
                return dict(grouped_data)
        except Exception as e:
            print(f"获取QA数据失败: {e}")
            return {}
        finally:
            self.db_manager.close()
    
    def find_similar_questions(self, questions: List[str]) -> List[Tuple[int, int, float]]:
        """找出相似的问题对
        
        Args:
            questions (List[str]): 问题列表
            
        Returns:
            List[Tuple[int, int, float]]: [(问题1索引, 问题2索引, 相似度), ...]
        """
        if len(questions) < 2:
            return []
        
        print(f"正在计算 {len(questions)} 个问题的BERT嵌入...")
        
        # 获取所有问题的嵌入向量
        embeddings = []
        for i, question in enumerate(questions):
            if i % 10 == 0:
                print(f"进度: {i+1}/{len(questions)}")
            embedding = self.get_bert_embedding(question)
            embeddings.append(embedding)
        
        embeddings = np.array(embeddings)
        
        print("正在计算相似度矩阵...")
        # 计算相似度矩阵
        similarity_matrix = cosine_similarity(embeddings)
        
        # 找出相似的问题对
        similar_pairs = []
        for i in range(len(questions)):
            for j in range(i + 1, len(questions)):
                similarity = similarity_matrix[i][j]
                if similarity >= self.similarity_threshold:
                    similar_pairs.append((i, j, similarity))
        
        # 按相似度降序排序
        similar_pairs.sort(key=lambda x: x[2], reverse=True)
        
        return similar_pairs
    
    def check_category_similarity(self, category_id: str, qa_pairs: List[Tuple[str, str]]) -> Dict:
        """检查特定类别的问题相似度
        
        Args:
            category_id (str): 类别ID
            qa_pairs (List[Tuple[str, str]]): 问答对列表
            
        Returns:
            Dict: 检查结果
        """
        questions = [qa[0] for qa in qa_pairs]
        
        print(f"\n检查类别: {category_id}")
        print(f"问题数量: {len(questions)}")
        
        if len(questions) < 2:
            print("问题数量少于2个，跳过相似度检查")
            return {
                'category_id': category_id,
                'total_questions': len(questions),
                'similar_pairs': [],
                'similarity_stats': {}
            }
        
        # 查找相似问题
        similar_pairs = self.find_similar_questions(questions)
        
        # 统计信息
        similarity_stats = {
            'total_pairs_checked': len(questions) * (len(questions) - 1) // 2,
            'similar_pairs_found': len(similar_pairs),
            'max_similarity': max([pair[2] for pair in similar_pairs]) if similar_pairs else 0,
            'avg_similarity': np.mean([pair[2] for pair in similar_pairs]) if similar_pairs else 0
        }
        
        # 输出结果
        if similar_pairs:
            print(f"发现 {len(similar_pairs)} 对相似问题:")
            for i, (idx1, idx2, similarity) in enumerate(similar_pairs[:10]):  # 只显示前10对
                print(f"\n  {i+1}. 相似度: {similarity:.4f}")
                print(f"     问题1: {questions[idx1]}")
                print(f"     问题2: {questions[idx2]}")
                if i == 9 and len(similar_pairs) > 10:
                    print(f"     ... 还有 {len(similar_pairs) - 10} 对相似问题")
        else:
            print("未发现高度相似的问题")
        
        return {
            'category_id': category_id,
            'total_questions': len(questions),
            'similar_pairs': [
                {
                    'question1': questions[idx1],
                    'question2': questions[idx2],
                    'similarity': similarity,
                    'answer1': qa_pairs[idx1][1],
                    'answer2': qa_pairs[idx2][1]
                }
                for idx1, idx2, similarity in similar_pairs
            ],
            'similarity_stats': similarity_stats
        }
    
    def check_all_categories(self) -> Dict[str, Dict]:
        """检查所有类别的问题相似度
        
        Returns:
            Dict[str, Dict]: {category_id: 检查结果, ...}
        """
        print("开始检查所有类别的问题相似度...")
        print(f"相似度阈值: {self.similarity_threshold}")
        print("=" * 60)
        
        # 获取所有数据
        all_data = self.get_all_qa_data()
        
        if not all_data:
            print("未获取到任何数据")
            return {}
        
        print(f"共找到 {len(all_data)} 个类别")
        
        # 检查每个类别
        results = {}
        for category_id, qa_pairs in all_data.items():
            # 获取水库名称
            reservoir_name = self.get_reservoir_name(category_id)
            print(f"\n水库: {reservoir_name} (ID: {category_id})")
            
            try:
                result = self.check_category_similarity(category_id, qa_pairs)
                results[category_id] = result
            except Exception as e:
                print(f"检查类别 {category_id} 时出错: {e}")
                results[category_id] = {
                    'category_id': category_id,
                    'error': str(e)
                }
        
        return results
    
    def get_reservoir_name(self, category_id: str) -> str:
        """根据category_id获取水库名称
        
        Args:
            category_id (str): 类别ID
            
        Returns:
            str: 水库名称
        """
        for name, cid in RESERVOIR_MAPPING.items():
            if cid == category_id:
                return name
        return f"未知水库({category_id})"
    
    def export_results(self, results: Dict[str, Dict], output_file: str = "similarity_check_results.xlsx"):
        """导出检查结果到Excel文件
        
        Args:
            results (Dict[str, Dict]): 检查结果
            output_file (str): 输出文件名
        """
        try:
            # 准备数据
            export_data = []
            
            for category_id, result in results.items():
                if 'error' in result:
                    continue
                    
                reservoir_name = self.get_reservoir_name(category_id)
                
                if result['similar_pairs']:
                    for pair in result['similar_pairs']:
                        export_data.append({
                            '水库名称': reservoir_name,
                            '类别ID': category_id,
                            '相似度': pair['similarity'],
                            '问题1': pair['question1'],
                            '问题2': pair['question2'],
                            '答案1': pair['answer1'][:100] + '...' if len(pair['answer1']) > 100 else pair['answer1'],
                            '答案2': pair['answer2'][:100] + '...' if len(pair['answer2']) > 100 else pair['answer2']
                        })
                else:
                    export_data.append({
                        '水库名称': reservoir_name,
                        '类别ID': category_id,
                        '相似度': 0,
                        '问题1': '无相似问题',
                        '问题2': '',
                        '答案1': '',
                        '答案2': ''
                    })
            
            # 创建DataFrame并导出
            df = pd.DataFrame(export_data)
            df.to_excel(output_file, index=False, engine='openpyxl')
            print(f"\n结果已导出到: {output_file}")
            
        except Exception as e:
            print(f"导出结果失败: {e}")
    
    def print_summary(self, results: Dict[str, Dict]):
        """打印检查结果摘要
        
        Args:
            results (Dict[str, Dict]): 检查结果
        """
        print("\n" + "=" * 60)
        print("检查结果摘要")
        print("=" * 60)
        
        total_categories = len(results)
        categories_with_similar = 0
        total_similar_pairs = 0
        
        for category_id, result in results.items():
            if 'error' not in result and result['similar_pairs']:
                categories_with_similar += 1
                total_similar_pairs += len(result['similar_pairs'])
        
        print(f"总类别数: {total_categories}")
        print(f"有相似问题的类别: {categories_with_similar}")
        print(f"总相似问题对数: {total_similar_pairs}")
        print(f"相似度阈值: {self.similarity_threshold}")
        
        if categories_with_similar > 0:
            print(f"\n建议检查这些相似问题，可能存在重复或需要合并")
        else:
            print(f"\n所有类别的问题都没有高度相似的情况")

def main():
    """主函数"""
    print("QA数据库语义相似度检查工具")
    print("=" * 50)
    
    if not BERT_AVAILABLE:
        print("缺少必要的依赖库，程序退出")
        return
    
    try:
        # 检查命令行参数
        similarity_threshold = 0.75  # 降低阈值，使数值不同的问题被视为不同
        if len(sys.argv) > 1:
            try:
                similarity_threshold = float(sys.argv[1])
                print(f"使用自定义相似度阈值: {similarity_threshold}")
            except ValueError:
                print(f"无效的相似度阈值，使用默认值: {similarity_threshold}")
        
        # 创建检查器
        checker = SemanticChecker(similarity_threshold=similarity_threshold)
        
        # 执行检查
        results = checker.check_all_categories()
        
        # 打印摘要
        checker.print_summary(results)
        
        # 导出结果
        if results:
            checker.export_results(results)
        
    except Exception as e:
        print(f"程序执行失败: {e}")
        import traceback
        traceback.print_exc()

if __name__ == "__main__":
    main()